Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.14/0103-4.14.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3031 - (show annotations) (download)
Wed Dec 13 13:38:00 2017 UTC (6 years, 4 months ago) by niro
File size: 116564 byte(s)
-linux-4.14.4
1 diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
2 index 07a250498fbb..f569db58f64a 100644
3 --- a/Documentation/devicetree/bindings/hwmon/jc42.txt
4 +++ b/Documentation/devicetree/bindings/hwmon/jc42.txt
5 @@ -34,6 +34,10 @@ Required properties:
6
7 - reg: I2C address
8
9 +Optional properties:
10 +- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
11 + This is not supported on all chips.
12 +
13 Example:
14
15 temp-sensor@1a {
16 diff --git a/Makefile b/Makefile
17 index ede4de0d8634..ba1648c093fe 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,7 +1,7 @@
21 # SPDX-License-Identifier: GPL-2.0
22 VERSION = 4
23 PATCHLEVEL = 14
24 -SUBLEVEL = 3
25 +SUBLEVEL = 4
26 EXTRAVERSION =
27 NAME = Petit Gorille
28
29 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
30 index 939b310913cf..3eb4397150df 100644
31 --- a/arch/arm64/Makefile
32 +++ b/arch/arm64/Makefile
33 @@ -77,9 +77,6 @@ endif
34
35 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
36 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
37 -ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
38 -KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o
39 -endif
40 endif
41
42 # Default value
43 diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
44 index 19bd97671bb8..4f766178fa6f 100644
45 --- a/arch/arm64/include/asm/module.h
46 +++ b/arch/arm64/include/asm/module.h
47 @@ -32,7 +32,7 @@ struct mod_arch_specific {
48 struct mod_plt_sec init;
49
50 /* for CONFIG_DYNAMIC_FTRACE */
51 - void *ftrace_trampoline;
52 + struct plt_entry *ftrace_trampoline;
53 };
54 #endif
55
56 @@ -45,4 +45,48 @@ extern u64 module_alloc_base;
57 #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
58 #endif
59
60 +struct plt_entry {
61 + /*
62 + * A program that conforms to the AArch64 Procedure Call Standard
63 + * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
64 + * IP1 (x17) may be inserted at any branch instruction that is
65 + * exposed to a relocation that supports long branches. Since that
66 + * is exactly what we are dealing with here, we are free to use x16
67 + * as a scratch register in the PLT veneers.
68 + */
69 + __le32 mov0; /* movn x16, #0x.... */
70 + __le32 mov1; /* movk x16, #0x...., lsl #16 */
71 + __le32 mov2; /* movk x16, #0x...., lsl #32 */
72 + __le32 br; /* br x16 */
73 +};
74 +
75 +static inline struct plt_entry get_plt_entry(u64 val)
76 +{
77 + /*
78 + * MOVK/MOVN/MOVZ opcode:
79 + * +--------+------------+--------+-----------+-------------+---------+
80 + * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
81 + * +--------+------------+--------+-----------+-------------+---------+
82 + *
83 + * Rd := 0x10 (x16)
84 + * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
85 + * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
86 + * sf := 1 (64-bit variant)
87 + */
88 + return (struct plt_entry){
89 + cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
90 + cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
91 + cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
92 + cpu_to_le32(0xd61f0200)
93 + };
94 +}
95 +
96 +static inline bool plt_entries_equal(const struct plt_entry *a,
97 + const struct plt_entry *b)
98 +{
99 + return a->mov0 == b->mov0 &&
100 + a->mov1 == b->mov1 &&
101 + a->mov2 == b->mov2;
102 +}
103 +
104 #endif /* __ASM_MODULE_H */
105 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
106 index 0029e13adb59..2f5ff2a65db3 100644
107 --- a/arch/arm64/kernel/Makefile
108 +++ b/arch/arm64/kernel/Makefile
109 @@ -63,6 +63,3 @@ extra-y += $(head-y) vmlinux.lds
110 ifeq ($(CONFIG_DEBUG_EFI),y)
111 AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
112 endif
113 -
114 -# will be included by each individual module but not by the core kernel itself
115 -extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
116 diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
117 deleted file mode 100644
118 index 00c4025be4ff..000000000000
119 --- a/arch/arm64/kernel/ftrace-mod.S
120 +++ /dev/null
121 @@ -1,18 +0,0 @@
122 -/*
123 - * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
124 - *
125 - * This program is free software; you can redistribute it and/or modify
126 - * it under the terms of the GNU General Public License version 2 as
127 - * published by the Free Software Foundation.
128 - */
129 -
130 -#include <linux/linkage.h>
131 -#include <asm/assembler.h>
132 -
133 - .section ".text.ftrace_trampoline", "ax"
134 - .align 3
135 -0: .quad 0
136 -__ftrace_trampoline:
137 - ldr x16, 0b
138 - br x16
139 -ENDPROC(__ftrace_trampoline)
140 diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
141 index c13b1fca0e5b..50986e388d2b 100644
142 --- a/arch/arm64/kernel/ftrace.c
143 +++ b/arch/arm64/kernel/ftrace.c
144 @@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
145
146 if (offset < -SZ_128M || offset >= SZ_128M) {
147 #ifdef CONFIG_ARM64_MODULE_PLTS
148 - unsigned long *trampoline;
149 + struct plt_entry trampoline;
150 struct module *mod;
151
152 /*
153 @@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
154 * is added in the future, but for now, the pr_err() below
155 * deals with a theoretical issue only.
156 */
157 - trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
158 - if (trampoline[0] != addr) {
159 - if (trampoline[0] != 0) {
160 + trampoline = get_plt_entry(addr);
161 + if (!plt_entries_equal(mod->arch.ftrace_trampoline,
162 + &trampoline)) {
163 + if (!plt_entries_equal(mod->arch.ftrace_trampoline,
164 + &(struct plt_entry){})) {
165 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
166 return -EINVAL;
167 }
168
169 /* point the trampoline to our ftrace entry point */
170 module_disable_ro(mod);
171 - trampoline[0] = addr;
172 + *mod->arch.ftrace_trampoline = trampoline;
173 module_enable_ro(mod, true);
174
175 /* update trampoline before patching in the branch */
176 smp_wmb();
177 }
178 - addr = (unsigned long)&trampoline[1];
179 + addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
180 #else /* CONFIG_ARM64_MODULE_PLTS */
181 return -EINVAL;
182 #endif /* CONFIG_ARM64_MODULE_PLTS */
183 diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
184 index d05dbe658409..ea640f92fe5a 100644
185 --- a/arch/arm64/kernel/module-plts.c
186 +++ b/arch/arm64/kernel/module-plts.c
187 @@ -11,21 +11,6 @@
188 #include <linux/module.h>
189 #include <linux/sort.h>
190
191 -struct plt_entry {
192 - /*
193 - * A program that conforms to the AArch64 Procedure Call Standard
194 - * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
195 - * IP1 (x17) may be inserted at any branch instruction that is
196 - * exposed to a relocation that supports long branches. Since that
197 - * is exactly what we are dealing with here, we are free to use x16
198 - * as a scratch register in the PLT veneers.
199 - */
200 - __le32 mov0; /* movn x16, #0x.... */
201 - __le32 mov1; /* movk x16, #0x...., lsl #16 */
202 - __le32 mov2; /* movk x16, #0x...., lsl #32 */
203 - __le32 br; /* br x16 */
204 -};
205 -
206 static bool in_init(const struct module *mod, void *loc)
207 {
208 return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
209 @@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
210 int i = pltsec->plt_num_entries;
211 u64 val = sym->st_value + rela->r_addend;
212
213 - /*
214 - * MOVK/MOVN/MOVZ opcode:
215 - * +--------+------------+--------+-----------+-------------+---------+
216 - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
217 - * +--------+------------+--------+-----------+-------------+---------+
218 - *
219 - * Rd := 0x10 (x16)
220 - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
221 - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
222 - * sf := 1 (64-bit variant)
223 - */
224 - plt[i] = (struct plt_entry){
225 - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
226 - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
227 - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
228 - cpu_to_le32(0xd61f0200)
229 - };
230 + plt[i] = get_plt_entry(val);
231
232 /*
233 * Check if the entry we just created is a duplicate. Given that the
234 * relocations are sorted, this will be the last entry we allocated.
235 * (if one exists).
236 */
237 - if (i > 0 &&
238 - plt[i].mov0 == plt[i - 1].mov0 &&
239 - plt[i].mov1 == plt[i - 1].mov1 &&
240 - plt[i].mov2 == plt[i - 1].mov2)
241 + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
242 return (u64)&plt[i - 1];
243
244 pltsec->plt_num_entries++;
245 @@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
246 unsigned long core_plts = 0;
247 unsigned long init_plts = 0;
248 Elf64_Sym *syms = NULL;
249 + Elf_Shdr *tramp = NULL;
250 int i;
251
252 /*
253 @@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
254 mod->arch.core.plt = sechdrs + i;
255 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
256 mod->arch.init.plt = sechdrs + i;
257 + else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
258 + !strcmp(secstrings + sechdrs[i].sh_name,
259 + ".text.ftrace_trampoline"))
260 + tramp = sechdrs + i;
261 else if (sechdrs[i].sh_type == SHT_SYMTAB)
262 syms = (Elf64_Sym *)sechdrs[i].sh_addr;
263 }
264 @@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
265 mod->arch.init.plt_num_entries = 0;
266 mod->arch.init.plt_max_entries = init_plts;
267
268 + if (tramp) {
269 + tramp->sh_type = SHT_NOBITS;
270 + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
271 + tramp->sh_addralign = __alignof__(struct plt_entry);
272 + tramp->sh_size = sizeof(struct plt_entry);
273 + }
274 +
275 return 0;
276 }
277 diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
278 index f7c9781a9d48..22e36a21c113 100644
279 --- a/arch/arm64/kernel/module.lds
280 +++ b/arch/arm64/kernel/module.lds
281 @@ -1,4 +1,5 @@
282 SECTIONS {
283 .plt (NOLOAD) : { BYTE(0) }
284 .init.plt (NOLOAD) : { BYTE(0) }
285 + .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
286 }
287 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
288 index 8ac0bd2bddb0..3280953a82cf 100644
289 --- a/arch/powerpc/kernel/misc_64.S
290 +++ b/arch/powerpc/kernel/misc_64.S
291 @@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
292 * NOTE, we rely on r0 being 0 from above.
293 */
294 mtspr SPRN_IAMR,r0
295 +BEGIN_FTR_SECTION_NESTED(42)
296 mtspr SPRN_AMOR,r0
297 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
298 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
299
300 /* save regs for local vars on new stack.
301 diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
302 index 3848af167df9..640cf566e986 100644
303 --- a/arch/powerpc/mm/hash_native_64.c
304 +++ b/arch/powerpc/mm/hash_native_64.c
305 @@ -47,7 +47,8 @@
306
307 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
308
309 -static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
310 +static inline unsigned long ___tlbie(unsigned long vpn, int psize,
311 + int apsize, int ssize)
312 {
313 unsigned long va;
314 unsigned int penc;
315 @@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
316 : "memory");
317 break;
318 }
319 - trace_tlbie(0, 0, va, 0, 0, 0, 0);
320 + return va;
321 +}
322 +
323 +static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
324 +{
325 + unsigned long rb;
326 +
327 + rb = ___tlbie(vpn, psize, apsize, ssize);
328 + trace_tlbie(0, 0, rb, 0, 0, 0, 0);
329 }
330
331 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
332 @@ -652,7 +661,7 @@ static void native_hpte_clear(void)
333 if (hpte_v & HPTE_V_VALID) {
334 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
335 hptep->v = 0;
336 - __tlbie(vpn, psize, apsize, ssize);
337 + ___tlbie(vpn, psize, apsize, ssize);
338 }
339 }
340
341 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
342 index 9a3cb3983c01..1a61b1b997f2 100644
343 --- a/arch/s390/include/asm/elf.h
344 +++ b/arch/s390/include/asm/elf.h
345 @@ -194,13 +194,14 @@ struct arch_elf_state {
346 #define CORE_DUMP_USE_REGSET
347 #define ELF_EXEC_PAGESIZE PAGE_SIZE
348
349 -/*
350 - * This is the base location for PIE (ET_DYN with INTERP) loads. On
351 - * 64-bit, this is raised to 4GB to leave the entire 32-bit address
352 - * space open for things that want to use the area for 32-bit pointers.
353 - */
354 -#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
355 - 0x100000000UL)
356 +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
357 + use of this is to invoke "./ld.so someprog" to test out a new version of
358 + the loader. We need to make sure that it is out of the way of the program
359 + that it will "exec", and that there is sufficient room for the brk. 64-bit
360 + tasks are aligned to 4GB. */
361 +#define ELF_ET_DYN_BASE (is_compat_task() ? \
362 + (STACK_TOP / 3 * 2) : \
363 + (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
364
365 /* This yields a mask that user programs can use to figure out what
366 instruction set this CPU supports. */
367 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
368 index 518d9286b3d1..2e956afe272c 100644
369 --- a/arch/x86/entry/entry_64.S
370 +++ b/arch/x86/entry/entry_64.S
371 @@ -51,19 +51,15 @@ ENTRY(native_usergs_sysret64)
372 END(native_usergs_sysret64)
373 #endif /* CONFIG_PARAVIRT */
374
375 -.macro TRACE_IRQS_FLAGS flags:req
376 +.macro TRACE_IRQS_IRETQ
377 #ifdef CONFIG_TRACE_IRQFLAGS
378 - bt $9, \flags /* interrupts off? */
379 + bt $9, EFLAGS(%rsp) /* interrupts off? */
380 jnc 1f
381 TRACE_IRQS_ON
382 1:
383 #endif
384 .endm
385
386 -.macro TRACE_IRQS_IRETQ
387 - TRACE_IRQS_FLAGS EFLAGS(%rsp)
388 -.endm
389 -
390 /*
391 * When dynamic function tracer is enabled it will add a breakpoint
392 * to all locations that it is about to modify, sync CPUs, update
393 @@ -927,13 +923,11 @@ ENTRY(native_load_gs_index)
394 FRAME_BEGIN
395 pushfq
396 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
397 - TRACE_IRQS_OFF
398 SWAPGS
399 .Lgs_change:
400 movl %edi, %gs
401 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
402 SWAPGS
403 - TRACE_IRQS_FLAGS (%rsp)
404 popfq
405 FRAME_END
406 ret
407 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
408 index f735c3016325..f02de8bc1f72 100644
409 --- a/arch/x86/include/asm/pgtable.h
410 +++ b/arch/x86/include/asm/pgtable.h
411 @@ -1093,6 +1093,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
412 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
413 }
414
415 +#define pud_write pud_write
416 +static inline int pud_write(pud_t pud)
417 +{
418 + return pud_flags(pud) & _PAGE_RW;
419 +}
420 +
421 /*
422 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
423 *
424 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
425 index 36c90d631096..ef03efba1c23 100644
426 --- a/arch/x86/kvm/lapic.c
427 +++ b/arch/x86/kvm/lapic.c
428 @@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
429 recalculate_apic_map(apic->vcpu->kvm);
430 }
431
432 +static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
433 +{
434 + return ((id >> 4) << 16) | (1 << (id & 0xf));
435 +}
436 +
437 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
438 {
439 - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
440 + u32 ldr = kvm_apic_calc_x2apic_ldr(id);
441
442 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
443
444 @@ -2196,6 +2201,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
445 {
446 if (apic_x2apic_mode(vcpu->arch.apic)) {
447 u32 *id = (u32 *)(s->regs + APIC_ID);
448 + u32 *ldr = (u32 *)(s->regs + APIC_LDR);
449
450 if (vcpu->kvm->arch.x2apic_format) {
451 if (*id != vcpu->vcpu_id)
452 @@ -2206,6 +2212,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
453 else
454 *id <<= 24;
455 }
456 +
457 + /* In x2APIC mode, the LDR is fixed and based on the id */
458 + if (set)
459 + *ldr = kvm_apic_calc_x2apic_ldr(*id);
460 }
461
462 return 0;
463 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
464 index ca209a4a7834..17fb6c6d939a 100644
465 --- a/arch/x86/kvm/svm.c
466 +++ b/arch/x86/kvm/svm.c
467 @@ -2189,6 +2189,8 @@ static int ud_interception(struct vcpu_svm *svm)
468 int er;
469
470 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
471 + if (er == EMULATE_USER_EXIT)
472 + return 0;
473 if (er != EMULATE_DONE)
474 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
475 return 1;
476 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
477 index 21cad7068cbf..b21113bcf227 100644
478 --- a/arch/x86/kvm/vmx.c
479 +++ b/arch/x86/kvm/vmx.c
480 @@ -5914,6 +5914,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
481 return 1;
482 }
483 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
484 + if (er == EMULATE_USER_EXIT)
485 + return 0;
486 if (er != EMULATE_DONE)
487 kvm_queue_exception(vcpu, UD_VECTOR);
488 return 1;
489 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
490 index 03869eb7fcd6..4195cbcdb310 100644
491 --- a/arch/x86/kvm/x86.c
492 +++ b/arch/x86/kvm/x86.c
493 @@ -1830,6 +1830,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
494 */
495 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
496
497 + if (guest_hv_clock.version & 1)
498 + ++guest_hv_clock.version; /* first time write, random junk */
499 +
500 vcpu->hv_clock.version = guest_hv_clock.version + 1;
501 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
502 &vcpu->hv_clock,
503 @@ -5705,6 +5708,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
504 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
505 emulation_type))
506 return EMULATE_DONE;
507 + if (ctxt->have_exception && inject_emulated_exception(vcpu))
508 + return EMULATE_DONE;
509 if (emulation_type & EMULTYPE_SKIP)
510 return EMULATE_FAIL;
511 return handle_emulation_failure(vcpu);
512 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
513 index 337cf382718e..a72659f452a5 100644
514 --- a/crypto/af_alg.c
515 +++ b/crypto/af_alg.c
516 @@ -1047,6 +1047,18 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
517 }
518 EXPORT_SYMBOL_GPL(af_alg_sendpage);
519
520 +/**
521 + * af_alg_free_resources - release resources required for crypto request
522 + */
523 +void af_alg_free_resources(struct af_alg_async_req *areq)
524 +{
525 + struct sock *sk = areq->sk;
526 +
527 + af_alg_free_areq_sgls(areq);
528 + sock_kfree_s(sk, areq, areq->areqlen);
529 +}
530 +EXPORT_SYMBOL_GPL(af_alg_free_resources);
531 +
532 /**
533 * af_alg_async_cb - AIO callback handler
534 *
535 @@ -1063,18 +1075,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
536 struct kiocb *iocb = areq->iocb;
537 unsigned int resultlen;
538
539 - lock_sock(sk);
540 -
541 /* Buffer size written by crypto operation. */
542 resultlen = areq->outlen;
543
544 - af_alg_free_areq_sgls(areq);
545 - sock_kfree_s(sk, areq, areq->areqlen);
546 - __sock_put(sk);
547 + af_alg_free_resources(areq);
548 + sock_put(sk);
549
550 iocb->ki_complete(iocb, err ? err : resultlen, 0);
551 -
552 - release_sock(sk);
553 }
554 EXPORT_SYMBOL_GPL(af_alg_async_cb);
555
556 diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
557 index 516b38c3a169..d0b45145cb30 100644
558 --- a/crypto/algif_aead.c
559 +++ b/crypto/algif_aead.c
560 @@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
561 struct aead_tfm *aeadc = pask->private;
562 struct crypto_aead *tfm = aeadc->aead;
563 struct crypto_skcipher *null_tfm = aeadc->null_tfm;
564 - unsigned int as = crypto_aead_authsize(tfm);
565 + unsigned int i, as = crypto_aead_authsize(tfm);
566 struct af_alg_async_req *areq;
567 - struct af_alg_tsgl *tsgl;
568 - struct scatterlist *src;
569 + struct af_alg_tsgl *tsgl, *tmp;
570 + struct scatterlist *rsgl_src, *tsgl_src = NULL;
571 int err = 0;
572 size_t used = 0; /* [in] TX bufs to be en/decrypted */
573 size_t outlen = 0; /* [out] RX bufs produced by kernel */
574 @@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
575 }
576
577 processed = used + ctx->aead_assoclen;
578 - tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
579 + list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
580 + for (i = 0; i < tsgl->cur; i++) {
581 + struct scatterlist *process_sg = tsgl->sg + i;
582 +
583 + if (!(process_sg->length) || !sg_page(process_sg))
584 + continue;
585 + tsgl_src = process_sg;
586 + break;
587 + }
588 + if (tsgl_src)
589 + break;
590 + }
591 + if (processed && !tsgl_src) {
592 + err = -EFAULT;
593 + goto free;
594 + }
595
596 /*
597 * Copy of AAD from source to destination
598 @@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
599 */
600
601 /* Use the RX SGL as source (and destination) for crypto op. */
602 - src = areq->first_rsgl.sgl.sg;
603 + rsgl_src = areq->first_rsgl.sgl.sg;
604
605 if (ctx->enc) {
606 /*
607 @@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
608 * v v
609 * RX SGL: AAD || PT || Tag
610 */
611 - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
612 + err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
613 areq->first_rsgl.sgl.sg, processed);
614 if (err)
615 goto free;
616 @@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
617 */
618
619 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
620 - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
621 + err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
622 areq->first_rsgl.sgl.sg, outlen);
623 if (err)
624 goto free;
625 @@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
626 areq->tsgl);
627 } else
628 /* no RX SGL present (e.g. authentication only) */
629 - src = areq->tsgl;
630 + rsgl_src = areq->tsgl;
631 }
632
633 /* Initialize the crypto operation */
634 - aead_request_set_crypt(&areq->cra_u.aead_req, src,
635 + aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
636 areq->first_rsgl.sgl.sg, used, ctx->iv);
637 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
638 aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
639
640 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
641 /* AIO operation */
642 + sock_hold(sk);
643 areq->iocb = msg->msg_iocb;
644 aead_request_set_callback(&areq->cra_u.aead_req,
645 CRYPTO_TFM_REQ_MAY_BACKLOG,
646 af_alg_async_cb, areq);
647 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
648 crypto_aead_decrypt(&areq->cra_u.aead_req);
649 +
650 + /* AIO operation in progress */
651 + if (err == -EINPROGRESS || err == -EBUSY) {
652 + /* Remember output size that will be generated. */
653 + areq->outlen = outlen;
654 +
655 + return -EIOCBQUEUED;
656 + }
657 +
658 + sock_put(sk);
659 } else {
660 /* Synchronous operation */
661 aead_request_set_callback(&areq->cra_u.aead_req,
662 @@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
663 &ctx->completion);
664 }
665
666 - /* AIO operation in progress */
667 - if (err == -EINPROGRESS) {
668 - sock_hold(sk);
669 -
670 - /* Remember output size that will be generated. */
671 - areq->outlen = outlen;
672 -
673 - return -EIOCBQUEUED;
674 - }
675
676 free:
677 - af_alg_free_areq_sgls(areq);
678 - sock_kfree_s(sk, areq, areq->areqlen);
679 + af_alg_free_resources(areq);
680
681 return err ? err : outlen;
682 }
683 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
684 index 8ae4170aaeb4..30ee2a8e8f42 100644
685 --- a/crypto/algif_skcipher.c
686 +++ b/crypto/algif_skcipher.c
687 @@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
688
689 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
690 /* AIO operation */
691 + sock_hold(sk);
692 areq->iocb = msg->msg_iocb;
693 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
694 CRYPTO_TFM_REQ_MAY_SLEEP,
695 @@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
696 err = ctx->enc ?
697 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
698 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
699 +
700 + /* AIO operation in progress */
701 + if (err == -EINPROGRESS || err == -EBUSY) {
702 + /* Remember output size that will be generated. */
703 + areq->outlen = len;
704 +
705 + return -EIOCBQUEUED;
706 + }
707 +
708 + sock_put(sk);
709 } else {
710 /* Synchronous operation */
711 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
712 @@ -137,19 +148,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
713 &ctx->completion);
714 }
715
716 - /* AIO operation in progress */
717 - if (err == -EINPROGRESS) {
718 - sock_hold(sk);
719 -
720 - /* Remember output size that will be generated. */
721 - areq->outlen = len;
722 -
723 - return -EIOCBQUEUED;
724 - }
725
726 free:
727 - af_alg_free_areq_sgls(areq);
728 - sock_kfree_s(sk, areq, areq->areqlen);
729 + af_alg_free_resources(areq);
730
731 return err ? err : len;
732 }
733 diff --git a/crypto/skcipher.c b/crypto/skcipher.c
734 index d5692e35fab1..778e0ff42bfa 100644
735 --- a/crypto/skcipher.c
736 +++ b/crypto/skcipher.c
737 @@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
738 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
739 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
740
741 + scatterwalk_done(&walk->in, 0, walk->total);
742 + scatterwalk_done(&walk->out, 0, walk->total);
743 +
744 walk->iv = req->iv;
745 walk->oiv = req->iv;
746
747 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
748 index 82b3ce5e937e..df842465634a 100644
749 --- a/drivers/acpi/ec.c
750 +++ b/drivers/acpi/ec.c
751 @@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
752 {
753 struct acpi_ec *ec = NULL;
754 int ret;
755 + bool is_ecdt = false;
756 + acpi_status status;
757
758 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
759 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
760
761 - ec = acpi_ec_alloc();
762 - if (!ec)
763 - return -ENOMEM;
764 - if (ec_parse_device(device->handle, 0, ec, NULL) !=
765 - AE_CTRL_TERMINATE) {
766 + if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
767 + is_ecdt = true;
768 + ec = boot_ec;
769 + } else {
770 + ec = acpi_ec_alloc();
771 + if (!ec)
772 + return -ENOMEM;
773 + status = ec_parse_device(device->handle, 0, ec, NULL);
774 + if (status != AE_CTRL_TERMINATE) {
775 ret = -EINVAL;
776 goto err_alloc;
777 + }
778 }
779
780 if (acpi_is_boot_ec(ec)) {
781 - boot_ec_is_ecdt = false;
782 - /*
783 - * Trust PNP0C09 namespace location rather than ECDT ID.
784 - *
785 - * But trust ECDT GPE rather than _GPE because of ASUS quirks,
786 - * so do not change boot_ec->gpe to ec->gpe.
787 - */
788 - boot_ec->handle = ec->handle;
789 - acpi_handle_debug(ec->handle, "duplicated.\n");
790 - acpi_ec_free(ec);
791 - ec = boot_ec;
792 - ret = acpi_config_boot_ec(ec, ec->handle, true, false);
793 + boot_ec_is_ecdt = is_ecdt;
794 + if (!is_ecdt) {
795 + /*
796 + * Trust PNP0C09 namespace location rather than
797 + * ECDT ID. But trust ECDT GPE rather than _GPE
798 + * because of ASUS quirks, so do not change
799 + * boot_ec->gpe to ec->gpe.
800 + */
801 + boot_ec->handle = ec->handle;
802 + acpi_handle_debug(ec->handle, "duplicated.\n");
803 + acpi_ec_free(ec);
804 + ec = boot_ec;
805 + }
806 + ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
807 } else
808 ret = acpi_ec_setup(ec, true);
809 if (ret)
810 @@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
811 ret = !!request_region(ec->command_addr, 1, "EC cmd");
812 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
813
814 - /* Reprobe devices depending on the EC */
815 - acpi_walk_dep_device_list(ec->handle);
816 + if (!is_ecdt) {
817 + /* Reprobe devices depending on the EC */
818 + acpi_walk_dep_device_list(ec->handle);
819 + }
820 acpi_handle_debug(ec->handle, "enumerated.\n");
821 return 0;
822
823 @@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
824
825 static const struct acpi_device_id ec_device_ids[] = {
826 {"PNP0C09", 0},
827 + {ACPI_ECDT_HID, 0},
828 {"", 0},
829 };
830
831 @@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
832 * Note: ec->handle can be valid if this function is called after
833 * acpi_ec_add(), hence the fast path.
834 */
835 - if (boot_ec->handle != ACPI_ROOT_OBJECT)
836 - handle = boot_ec->handle;
837 - else if (!acpi_ec_ecdt_get_handle(&handle))
838 - return -ENODEV;
839 - return acpi_config_boot_ec(boot_ec, handle, true, true);
840 + if (boot_ec->handle == ACPI_ROOT_OBJECT) {
841 + if (!acpi_ec_ecdt_get_handle(&handle))
842 + return -ENODEV;
843 + boot_ec->handle = handle;
844 + }
845 +
846 + /* Register to ACPI bus with PM ops attached */
847 + return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
848 }
849
850 #if 0
851 @@ -2020,6 +2035,12 @@ int __init acpi_ec_init(void)
852
853 /* Drivers must be started after acpi_ec_query_init() */
854 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
855 + /*
856 + * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
857 + * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
858 + * settings but invalid DSDT settings.
859 + * https://bugzilla.kernel.org/show_bug.cgi?id=196847
860 + */
861 ecdt_fail = acpi_ec_ecdt_start();
862 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
863 }
864 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
865 index 4361c4415b4f..ede83d38beed 100644
866 --- a/drivers/acpi/internal.h
867 +++ b/drivers/acpi/internal.h
868 @@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
869 bool acpi_device_is_battery(struct acpi_device *adev);
870 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
871 const struct device *dev);
872 +int acpi_bus_register_early_device(int type);
873
874 /* --------------------------------------------------------------------------
875 Device Matching and Notification
876 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
877 index 602f8ff212f2..2f2f50322ffb 100644
878 --- a/drivers/acpi/scan.c
879 +++ b/drivers/acpi/scan.c
880 @@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
881 case ACPI_BUS_TYPE_SLEEP_BUTTON:
882 strcpy(device->pnp.bus_id, "SLPF");
883 break;
884 + case ACPI_BUS_TYPE_ECDT_EC:
885 + strcpy(device->pnp.bus_id, "ECDT");
886 + break;
887 default:
888 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
889 /* Clean up trailing underscores (if any) */
890 @@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
891 case ACPI_BUS_TYPE_SLEEP_BUTTON:
892 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
893 break;
894 + case ACPI_BUS_TYPE_ECDT_EC:
895 + acpi_add_id(pnp, ACPI_ECDT_HID);
896 + break;
897 }
898 }
899
900 @@ -2049,6 +2055,21 @@ void acpi_bus_trim(struct acpi_device *adev)
901 }
902 EXPORT_SYMBOL_GPL(acpi_bus_trim);
903
904 +int acpi_bus_register_early_device(int type)
905 +{
906 + struct acpi_device *device = NULL;
907 + int result;
908 +
909 + result = acpi_add_single_object(&device, NULL,
910 + type, ACPI_STA_DEFAULT);
911 + if (result)
912 + return result;
913 +
914 + device->flags.match_driver = true;
915 + return device_attach(&device->dev);
916 +}
917 +EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
918 +
919 static int acpi_bus_scan_fixed(void)
920 {
921 int result = 0;
922 diff --git a/drivers/dax/device.c b/drivers/dax/device.c
923 index e9f3b3e4bbf4..375b99bca002 100644
924 --- a/drivers/dax/device.c
925 +++ b/drivers/dax/device.c
926 @@ -427,9 +427,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
927 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
928 }
929
930 +static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
931 +{
932 + struct file *filp = vma->vm_file;
933 + struct dev_dax *dev_dax = filp->private_data;
934 + struct dax_region *dax_region = dev_dax->region;
935 +
936 + if (!IS_ALIGNED(addr, dax_region->align))
937 + return -EINVAL;
938 + return 0;
939 +}
940 +
941 static const struct vm_operations_struct dax_vm_ops = {
942 .fault = dev_dax_fault,
943 .huge_fault = dev_dax_huge_fault,
944 + .split = dev_dax_split,
945 };
946
947 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
948 diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
949 index dec3a815455d..b44d9d7db347 100644
950 --- a/drivers/dma-buf/reservation.c
951 +++ b/drivers/dma-buf/reservation.c
952 @@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
953 * @dst: the destination reservation object
954 * @src: the source reservation object
955 *
956 -* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
957 -* held.
958 +* Copy all fences from src to dst. dst-lock must be held.
959 */
960 int reservation_object_copy_fences(struct reservation_object *dst,
961 struct reservation_object *src)
962 @@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
963 size_t size;
964 unsigned i;
965
966 - src_list = reservation_object_get_list(src);
967 + rcu_read_lock();
968 + src_list = rcu_dereference(src->fence);
969
970 +retry:
971 if (src_list) {
972 - size = offsetof(typeof(*src_list),
973 - shared[src_list->shared_count]);
974 + unsigned shared_count = src_list->shared_count;
975 +
976 + size = offsetof(typeof(*src_list), shared[shared_count]);
977 + rcu_read_unlock();
978 +
979 dst_list = kmalloc(size, GFP_KERNEL);
980 if (!dst_list)
981 return -ENOMEM;
982
983 - dst_list->shared_count = src_list->shared_count;
984 - dst_list->shared_max = src_list->shared_count;
985 - for (i = 0; i < src_list->shared_count; ++i)
986 - dst_list->shared[i] =
987 - dma_fence_get(src_list->shared[i]);
988 + rcu_read_lock();
989 + src_list = rcu_dereference(src->fence);
990 + if (!src_list || src_list->shared_count > shared_count) {
991 + kfree(dst_list);
992 + goto retry;
993 + }
994 +
995 + dst_list->shared_count = 0;
996 + dst_list->shared_max = shared_count;
997 + for (i = 0; i < src_list->shared_count; ++i) {
998 + struct dma_fence *fence;
999 +
1000 + fence = rcu_dereference(src_list->shared[i]);
1001 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1002 + &fence->flags))
1003 + continue;
1004 +
1005 + if (!dma_fence_get_rcu(fence)) {
1006 + kfree(dst_list);
1007 + src_list = rcu_dereference(src->fence);
1008 + goto retry;
1009 + }
1010 +
1011 + if (dma_fence_is_signaled(fence)) {
1012 + dma_fence_put(fence);
1013 + continue;
1014 + }
1015 +
1016 + dst_list->shared[dst_list->shared_count++] = fence;
1017 + }
1018 } else {
1019 dst_list = NULL;
1020 }
1021
1022 + new = dma_fence_get_rcu_safe(&src->fence_excl);
1023 + rcu_read_unlock();
1024 +
1025 kfree(dst->staged);
1026 dst->staged = NULL;
1027
1028 src_list = reservation_object_get_list(dst);
1029 -
1030 old = reservation_object_get_excl(dst);
1031 - new = reservation_object_get_excl(src);
1032 -
1033 - dma_fence_get(new);
1034
1035 preempt_disable();
1036 write_seqcount_begin(&dst->seq);
1037 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1038 index 103635ab784c..87801faaf264 100644
1039 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1040 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1041 @@ -1536,18 +1536,14 @@ struct amdgpu_device {
1042 /* sdma */
1043 struct amdgpu_sdma sdma;
1044
1045 - union {
1046 - struct {
1047 - /* uvd */
1048 - struct amdgpu_uvd uvd;
1049 -
1050 - /* vce */
1051 - struct amdgpu_vce vce;
1052 - };
1053 -
1054 - /* vcn */
1055 - struct amdgpu_vcn vcn;
1056 - };
1057 + /* uvd */
1058 + struct amdgpu_uvd uvd;
1059 +
1060 + /* vce */
1061 + struct amdgpu_vce vce;
1062 +
1063 + /* vcn */
1064 + struct amdgpu_vcn vcn;
1065
1066 /* firmwares */
1067 struct amdgpu_firmware firmware;
1068 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1069 index ce443586a0c7..cc4e18dcd8b6 100644
1070 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1071 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
1072 @@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
1073 return true;
1074 }
1075
1076 -/* Atom needs data in little endian format
1077 - * so swap as appropriate when copying data to
1078 - * or from atom. Note that atom operates on
1079 - * dw units.
1080 +/* Atom needs data in little endian format so swap as appropriate when copying
1081 + * data to or from atom. Note that atom operates on dw units.
1082 + *
1083 + * Use to_le=true when sending data to atom and provide at least
1084 + * ALIGN(num_bytes,4) bytes in the dst buffer.
1085 + *
1086 + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1087 + * byes in the src buffer.
1088 */
1089 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1090 {
1091 #ifdef __BIG_ENDIAN
1092 - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
1093 - u32 *dst32, *src32;
1094 + u32 src_tmp[5], dst_tmp[5];
1095 int i;
1096 + u8 align_num_bytes = ALIGN(num_bytes, 4);
1097
1098 - memcpy(src_tmp, src, num_bytes);
1099 - src32 = (u32 *)src_tmp;
1100 - dst32 = (u32 *)dst_tmp;
1101 if (to_le) {
1102 - for (i = 0; i < ((num_bytes + 3) / 4); i++)
1103 - dst32[i] = cpu_to_le32(src32[i]);
1104 - memcpy(dst, dst_tmp, num_bytes);
1105 + memcpy(src_tmp, src, num_bytes);
1106 + for (i = 0; i < align_num_bytes / 4; i++)
1107 + dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1108 + memcpy(dst, dst_tmp, align_num_bytes);
1109 } else {
1110 - u8 dws = num_bytes & ~3;
1111 - for (i = 0; i < ((num_bytes + 3) / 4); i++)
1112 - dst32[i] = le32_to_cpu(src32[i]);
1113 - memcpy(dst, dst_tmp, dws);
1114 - if (num_bytes % 4) {
1115 - for (i = 0; i < (num_bytes % 4); i++)
1116 - dst[dws+i] = dst_tmp[dws+i];
1117 - }
1118 + memcpy(src_tmp, src, align_num_bytes);
1119 + for (i = 0; i < align_num_bytes / 4; i++)
1120 + dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1121 + memcpy(dst, dst_tmp, num_bytes);
1122 }
1123 #else
1124 memcpy(dst, src, num_bytes);
1125 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1126 index c21adf60a7f2..057e1ecd83ce 100644
1127 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1128 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
1129 @@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
1130 return false;
1131 }
1132
1133 - tmp = bios[0x18] | (bios[0x19] << 8);
1134 - if (bios[tmp + 0x14] != 0x0) {
1135 - DRM_INFO("Not an x86 BIOS ROM\n");
1136 - return false;
1137 - }
1138 -
1139 bios_header_start = bios[0x48] | (bios[0x49] << 8);
1140 if (!bios_header_start) {
1141 DRM_INFO("Can't locate bios header\n");
1142 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1143 index 9e495da0bb03..ffe483980362 100644
1144 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1145 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1146 @@ -391,6 +391,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
1147 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
1148 &bo->placement, page_align, !kernel, NULL,
1149 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
1150 + if (unlikely(r != 0))
1151 + return r;
1152 +
1153 bytes_moved = atomic64_read(&adev->num_bytes_moved) -
1154 initial_bytes_moved;
1155 if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
1156 @@ -400,9 +403,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
1157 else
1158 amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
1159
1160 - if (unlikely(r != 0))
1161 - return r;
1162 -
1163 if (kernel)
1164 bo->tbo.priority = 1;
1165
1166 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1167 index c855366521ab..9fc3d387eae3 100644
1168 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1169 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1170 @@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
1171 uint32_t allocated = 0;
1172 uint32_t tmp, handle = 0;
1173 uint32_t *size = &tmp;
1174 - int i, r, idx = 0;
1175 + int i, r = 0, idx = 0;
1176
1177 p->job->vm = NULL;
1178 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1179 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1180 index bd20ff018512..863c6dd0123a 100644
1181 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1182 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1183 @@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
1184 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1185 struct amdgpu_vm *vm)
1186 {
1187 - int r;
1188 + int r = 0;
1189
1190 r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
1191 if (r)
1192 @@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1193 {
1194 struct amdgpu_bo_va_mapping *mapping, *tmp;
1195 bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
1196 - int i;
1197 + struct amdgpu_bo *root;
1198 + int i, r;
1199
1200 amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1201
1202 @@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1203 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
1204 }
1205
1206 - amdgpu_vm_free_levels(&vm->root);
1207 + root = amdgpu_bo_ref(vm->root.bo);
1208 + r = amdgpu_bo_reserve(root, true);
1209 + if (r) {
1210 + dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
1211 + } else {
1212 + amdgpu_vm_free_levels(&vm->root);
1213 + amdgpu_bo_unreserve(root);
1214 + }
1215 + amdgpu_bo_unref(&root);
1216 dma_fence_put(vm->last_dir_update);
1217 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
1218 amdgpu_vm_free_reserved_vmid(adev, vm, i);
1219 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1220 index d04d0b123212..6dc0f6e346e7 100644
1221 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1222 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
1223 @@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle)
1224 static int gmc_v9_0_late_init(void *handle)
1225 {
1226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227 - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
1228 + /*
1229 + * The latest engine allocation on gfx9 is:
1230 + * Engine 0, 1: idle
1231 + * Engine 2, 3: firmware
1232 + * Engine 4~13: amdgpu ring, subject to change when ring number changes
1233 + * Engine 14~15: idle
1234 + * Engine 16: kfd tlb invalidation
1235 + * Engine 17: Gart flushes
1236 + */
1237 + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
1238 unsigned i;
1239
1240 for(i = 0; i < adev->num_rings; ++i) {
1241 @@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle)
1242 ring->funcs->vmhub);
1243 }
1244
1245 - /* Engine 17 is used for GART flushes */
1246 + /* Engine 16 is used for KFD and 17 for GART flushes */
1247 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
1248 - BUG_ON(vm_inv_eng[i] > 17);
1249 + BUG_ON(vm_inv_eng[i] > 16);
1250
1251 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
1252 }
1253 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1254 index f2c3a49f73a0..3e59c766722c 100644
1255 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1256 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1257 @@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
1258 }
1259 static u32 soc15_get_xclk(struct amdgpu_device *adev)
1260 {
1261 - if (adev->asic_type == CHIP_VEGA10)
1262 - return adev->clock.spll.reference_freq/4;
1263 - else
1264 - return adev->clock.spll.reference_freq;
1265 + return adev->clock.spll.reference_freq;
1266 }
1267
1268
1269 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1270 index 21e7b88401e1..a098712bdd2f 100644
1271 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1272 +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
1273 @@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1274
1275 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1276 {
1277 - adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
1278 + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1279 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1280 }
1281
1282 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1283 index 84f01fd33aff..b50aa292d026 100644
1284 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1285 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
1286 @@ -850,9 +850,9 @@ static int init_over_drive_limits(
1287 const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
1288 {
1289 hwmgr->platform_descriptor.overdriveLimit.engineClock =
1290 - le16_to_cpu(powerplay_table->ulMaxODEngineClock);
1291 + le32_to_cpu(powerplay_table->ulMaxODEngineClock);
1292 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1293 - le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
1294 + le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
1295
1296 hwmgr->platform_descriptor.minOverdriveVDDC = 0;
1297 hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
1298 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1299 index 6bb6337be920..fc7946eb6665 100644
1300 --- a/drivers/gpu/drm/drm_edid.c
1301 +++ b/drivers/gpu/drm/drm_edid.c
1302 @@ -4809,7 +4809,8 @@ void
1303 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
1304 const struct drm_display_mode *mode,
1305 enum hdmi_quantization_range rgb_quant_range,
1306 - bool rgb_quant_range_selectable)
1307 + bool rgb_quant_range_selectable,
1308 + bool is_hdmi2_sink)
1309 {
1310 /*
1311 * CEA-861:
1312 @@ -4833,8 +4834,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
1313 * YQ-field to match the RGB Quantization Range being transmitted
1314 * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
1315 * set YQ=1) and the Sink shall ignore the YQ-field."
1316 + *
1317 + * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
1318 + * by non-zero YQ when receiving RGB. There doesn't seem to be any
1319 + * good way to tell which version of CEA-861 the sink supports, so
1320 + * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
1321 + * on on CEA-861-F.
1322 */
1323 - if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
1324 + if (!is_hdmi2_sink ||
1325 + rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
1326 frame->ycc_quantization_range =
1327 HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
1328 else
1329 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1330 index 1b8f013ffa65..5e93589c335c 100644
1331 --- a/drivers/gpu/drm/drm_fb_helper.c
1332 +++ b/drivers/gpu/drm/drm_fb_helper.c
1333 @@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1334
1335 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
1336 DRM_INFO("Cannot find any crtc or sizes\n");
1337 +
1338 + /* First time: disable all crtc's.. */
1339 + if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
1340 + restore_fbdev_mode(fb_helper);
1341 return -EAGAIN;
1342 }
1343
1344 diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
1345 index 70f2b9593edc..17e8ef9a1c11 100644
1346 --- a/drivers/gpu/drm/drm_vblank.c
1347 +++ b/drivers/gpu/drm/drm_vblank.c
1348 @@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
1349 u32 vblank;
1350 unsigned long flags;
1351
1352 - WARN(!dev->driver->get_vblank_timestamp,
1353 - "This function requires support for accurate vblank timestamps.");
1354 + WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
1355 + "This function requires support for accurate vblank timestamps.");
1356
1357 spin_lock_irqsave(&dev->vblank_time_lock, flags);
1358
1359 @@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1360 assert_spin_locked(&dev->event_lock);
1361
1362 e->pipe = pipe;
1363 - e->event.sequence = drm_vblank_count(dev, pipe);
1364 + e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
1365 e->event.crtc_id = crtc->base.id;
1366 list_add_tail(&e->base.link, &dev->vblank_event_list);
1367 }
1368 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1369 index edd7d8127d19..c54806d08dd7 100644
1370 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1371 +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
1372 @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
1373 {
1374 struct drm_encoder *encoder = &fsl_dev->encoder;
1375 struct drm_connector *connector = &fsl_dev->connector.base;
1376 - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
1377 int ret;
1378
1379 fsl_dev->connector.encoder = encoder;
1380 @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
1381 if (ret < 0)
1382 goto err_sysfs;
1383
1384 - drm_object_property_set_value(&connector->base,
1385 - mode_config->dpms_property,
1386 - DRM_MODE_DPMS_OFF);
1387 -
1388 ret = drm_panel_attach(panel, connector);
1389 if (ret) {
1390 dev_err(fsl_dev->dev, "failed to attach panel\n");
1391 diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1392 index 9823477b1855..2269be91f3e1 100644
1393 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1394 +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
1395 @@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
1396 {
1397 struct ade_crtc *acrtc = to_ade_crtc(crtc);
1398 struct ade_hw_ctx *ctx = acrtc->ctx;
1399 + struct drm_display_mode *mode = &crtc->state->mode;
1400 + struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
1401
1402 if (!ctx->power_on)
1403 (void)ade_power_up(ctx);
1404 + ade_ldi_set_mode(acrtc, mode, adj_mode);
1405 }
1406
1407 static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
1408 diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
1409 index e6dfc3331f4b..a385838e2919 100644
1410 --- a/drivers/gpu/drm/i915/gvt/gtt.c
1411 +++ b/drivers/gpu/drm/i915/gvt/gtt.c
1412 @@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
1413
1414 #define GTT_HAW 46
1415
1416 -#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
1417 -#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
1418 -#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
1419 +#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
1420 +#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
1421 +#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
1422
1423 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
1424 {
1425 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1426 index 9f45cfeae775..82498f8232eb 100644
1427 --- a/drivers/gpu/drm/i915/i915_drv.c
1428 +++ b/drivers/gpu/drm/i915/i915_drv.c
1429 @@ -2591,6 +2591,8 @@ static int intel_runtime_resume(struct device *kdev)
1430 ret = vlv_resume_prepare(dev_priv, true);
1431 }
1432
1433 + intel_uncore_runtime_resume(dev_priv);
1434 +
1435 /*
1436 * No point of rolling back things in case of an error, as the best
1437 * we can do is to hope that things will still work (and disable RPM).
1438 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
1439 index 262e75c00dd2..da2d309574ba 100644
1440 --- a/drivers/gpu/drm/i915/intel_fbdev.c
1441 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
1442 @@ -694,10 +694,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
1443
1444 /* Due to peculiar init order wrt to hpd handling this is separate. */
1445 if (drm_fb_helper_initial_config(&ifbdev->helper,
1446 - ifbdev->preferred_bpp)) {
1447 + ifbdev->preferred_bpp))
1448 intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
1449 - intel_fbdev_fini(to_i915(ifbdev->helper.dev));
1450 - }
1451 }
1452
1453 void intel_fbdev_initial_config_async(struct drm_device *dev)
1454 @@ -797,7 +795,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
1455 {
1456 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
1457
1458 - if (ifbdev)
1459 + if (!ifbdev)
1460 + return;
1461 +
1462 + intel_fbdev_sync(ifbdev);
1463 + if (ifbdev->vma)
1464 drm_fb_helper_hotplug_event(&ifbdev->helper);
1465 }
1466
1467 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1468 index e8abea7594ec..3fed1d3ecded 100644
1469 --- a/drivers/gpu/drm/i915/intel_hdmi.c
1470 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1471 @@ -481,7 +481,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
1472 crtc_state->limited_color_range ?
1473 HDMI_QUANTIZATION_RANGE_LIMITED :
1474 HDMI_QUANTIZATION_RANGE_FULL,
1475 - intel_hdmi->rgb_quant_range_selectable);
1476 + intel_hdmi->rgb_quant_range_selectable,
1477 + is_hdmi2_sink);
1478
1479 /* TODO: handle pixel repetition for YCBCR420 outputs */
1480 intel_write_infoframe(encoder, crtc_state, &frame);
1481 diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
1482 index eb5827110d8f..49fdf09f9919 100644
1483 --- a/drivers/gpu/drm/i915/intel_i2c.c
1484 +++ b/drivers/gpu/drm/i915/intel_i2c.c
1485 @@ -438,7 +438,9 @@ static bool
1486 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
1487 {
1488 return (i + 1 < num &&
1489 - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
1490 + msgs[i].addr == msgs[i + 1].addr &&
1491 + !(msgs[i].flags & I2C_M_RD) &&
1492 + (msgs[i].len == 1 || msgs[i].len == 2) &&
1493 (msgs[i + 1].flags & I2C_M_RD));
1494 }
1495
1496 diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1497 index 1d7b879cc68c..e9ed02518406 100644
1498 --- a/drivers/gpu/drm/i915/intel_uncore.c
1499 +++ b/drivers/gpu/drm/i915/intel_uncore.c
1500 @@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
1501 i915_check_and_clear_faults(dev_priv);
1502 }
1503
1504 +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
1505 +{
1506 + iosf_mbi_register_pmic_bus_access_notifier(
1507 + &dev_priv->uncore.pmic_bus_access_nb);
1508 +}
1509 +
1510 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
1511 {
1512 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
1513 @@ -1171,8 +1177,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1514 * bus, which will be busy after this notification, leading to:
1515 * "render: timed out waiting for forcewake ack request."
1516 * errors.
1517 + *
1518 + * The notifier is unregistered during intel_runtime_suspend(),
1519 + * so it's ok to access the HW here without holding a RPM
1520 + * wake reference -> disable wakeref asserts for the time of
1521 + * the access.
1522 */
1523 + disable_rpm_wakeref_asserts(dev_priv);
1524 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1525 + enable_rpm_wakeref_asserts(dev_priv);
1526 break;
1527 case MBI_PMIC_BUS_ACCESS_END:
1528 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1529 diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
1530 index 5f90278da461..0bdc3fcc0e64 100644
1531 --- a/drivers/gpu/drm/i915/intel_uncore.h
1532 +++ b/drivers/gpu/drm/i915/intel_uncore.h
1533 @@ -121,6 +121,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
1534 void intel_uncore_fini(struct drm_i915_private *dev_priv);
1535 void intel_uncore_suspend(struct drm_i915_private *dev_priv);
1536 void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
1537 +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
1538
1539 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
1540 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
1541 diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
1542 index daf286fc8a40..ca1e3b489540 100644
1543 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c
1544 +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
1545 @@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
1546 }
1547
1548 static const struct soc_device_attribute dpi_soc_devices[] = {
1549 - { .family = "OMAP3[456]*" },
1550 - { .family = "[AD]M37*" },
1551 + { .machine = "OMAP3[456]*" },
1552 + { .machine = "[AD]M37*" },
1553 { /* sentinel */ }
1554 };
1555
1556 diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1557 index 365cf07daa01..c3453f3bd603 100644
1558 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1559 +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
1560 @@ -889,25 +889,36 @@ struct hdmi4_features {
1561 bool audio_use_mclk;
1562 };
1563
1564 -static const struct hdmi4_features hdmi4_es1_features = {
1565 +static const struct hdmi4_features hdmi4430_es1_features = {
1566 .cts_swmode = false,
1567 .audio_use_mclk = false,
1568 };
1569
1570 -static const struct hdmi4_features hdmi4_es2_features = {
1571 +static const struct hdmi4_features hdmi4430_es2_features = {
1572 .cts_swmode = true,
1573 .audio_use_mclk = false,
1574 };
1575
1576 -static const struct hdmi4_features hdmi4_es3_features = {
1577 +static const struct hdmi4_features hdmi4_features = {
1578 .cts_swmode = true,
1579 .audio_use_mclk = true,
1580 };
1581
1582 static const struct soc_device_attribute hdmi4_soc_devices[] = {
1583 - { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
1584 - { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
1585 - { .family = "OMAP4", .data = &hdmi4_es3_features },
1586 + {
1587 + .machine = "OMAP4430",
1588 + .revision = "ES1.?",
1589 + .data = &hdmi4430_es1_features,
1590 + },
1591 + {
1592 + .machine = "OMAP4430",
1593 + .revision = "ES2.?",
1594 + .data = &hdmi4430_es2_features,
1595 + },
1596 + {
1597 + .family = "OMAP4",
1598 + .data = &hdmi4_features,
1599 + },
1600 { /* sentinel */ }
1601 };
1602
1603 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
1604 index 474fa759e06e..234af81fb3d0 100644
1605 --- a/drivers/gpu/drm/panel/panel-simple.c
1606 +++ b/drivers/gpu/drm/panel/panel-simple.c
1607 @@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev)
1608 drm_panel_remove(&panel->base);
1609
1610 panel_simple_disable(&panel->base);
1611 + panel_simple_unprepare(&panel->base);
1612
1613 if (panel->ddc)
1614 put_device(&panel->ddc->dev);
1615 @@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev)
1616 struct panel_simple *panel = dev_get_drvdata(dev);
1617
1618 panel_simple_disable(&panel->base);
1619 + panel_simple_unprepare(&panel->base);
1620 }
1621
1622 static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
1623 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1624 index 432cb46f6a34..fd7682bf335d 100644
1625 --- a/drivers/gpu/drm/radeon/atombios_dp.c
1626 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1627 @@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
1628
1629 /***** radeon AUX functions *****/
1630
1631 -/* Atom needs data in little endian format
1632 - * so swap as appropriate when copying data to
1633 - * or from atom. Note that atom operates on
1634 - * dw units.
1635 +/* Atom needs data in little endian format so swap as appropriate when copying
1636 + * data to or from atom. Note that atom operates on dw units.
1637 + *
1638 + * Use to_le=true when sending data to atom and provide at least
1639 + * ALIGN(num_bytes,4) bytes in the dst buffer.
1640 + *
1641 + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1642 + * byes in the src buffer.
1643 */
1644 void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1645 {
1646 #ifdef __BIG_ENDIAN
1647 - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
1648 - u32 *dst32, *src32;
1649 + u32 src_tmp[5], dst_tmp[5];
1650 int i;
1651 + u8 align_num_bytes = ALIGN(num_bytes, 4);
1652
1653 - memcpy(src_tmp, src, num_bytes);
1654 - src32 = (u32 *)src_tmp;
1655 - dst32 = (u32 *)dst_tmp;
1656 if (to_le) {
1657 - for (i = 0; i < ((num_bytes + 3) / 4); i++)
1658 - dst32[i] = cpu_to_le32(src32[i]);
1659 - memcpy(dst, dst_tmp, num_bytes);
1660 + memcpy(src_tmp, src, num_bytes);
1661 + for (i = 0; i < align_num_bytes / 4; i++)
1662 + dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1663 + memcpy(dst, dst_tmp, align_num_bytes);
1664 } else {
1665 - u8 dws = num_bytes & ~3;
1666 - for (i = 0; i < ((num_bytes + 3) / 4); i++)
1667 - dst32[i] = le32_to_cpu(src32[i]);
1668 - memcpy(dst, dst_tmp, dws);
1669 - if (num_bytes % 4) {
1670 - for (i = 0; i < (num_bytes % 4); i++)
1671 - dst[dws+i] = dst_tmp[dws+i];
1672 - }
1673 + memcpy(src_tmp, src, align_num_bytes);
1674 + for (i = 0; i < align_num_bytes / 4; i++)
1675 + dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1676 + memcpy(dst, dst_tmp, num_bytes);
1677 }
1678 #else
1679 memcpy(dst, src, num_bytes);
1680 diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
1681 index fd25361ac681..4ef967d1a9de 100644
1682 --- a/drivers/gpu/drm/radeon/radeon_fb.c
1683 +++ b/drivers/gpu/drm/radeon/radeon_fb.c
1684 @@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
1685 }
1686
1687 info->par = rfbdev;
1688 - info->skip_vt_switch = true;
1689
1690 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
1691 if (ret) {
1692 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1693 index 406fe4544b83..06d6e785c920 100644
1694 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1695 +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
1696 @@ -24,6 +24,7 @@
1697 #include <linux/completion.h>
1698 #include <linux/dma-mapping.h>
1699 #include <linux/of_graph.h>
1700 +#include <linux/math64.h>
1701
1702 #include "tilcdc_drv.h"
1703 #include "tilcdc_regs.h"
1704 @@ -48,6 +49,7 @@ struct tilcdc_crtc {
1705 unsigned int lcd_fck_rate;
1706
1707 ktime_t last_vblank;
1708 + unsigned int hvtotal_us;
1709
1710 struct drm_framebuffer *curr_fb;
1711 struct drm_framebuffer *next_fb;
1712 @@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
1713 LCDC_V2_CORE_CLK_EN);
1714 }
1715
1716 +uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
1717 +{
1718 + return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
1719 + mode->clock);
1720 +}
1721 +
1722 static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
1723 {
1724 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
1725 @@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
1726 drm_framebuffer_reference(fb);
1727
1728 crtc->hwmode = crtc->state->adjusted_mode;
1729 +
1730 + tilcdc_crtc->hvtotal_us =
1731 + tilcdc_mode_hvtotal(&crtc->hwmode);
1732 }
1733
1734 static void tilcdc_crtc_enable(struct drm_crtc *crtc)
1735 @@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
1736 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
1737
1738 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
1739 - 1000000 / crtc->hwmode.vrefresh);
1740 + tilcdc_crtc->hvtotal_us);
1741 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
1742
1743 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
1744 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1745 index 180ce6296416..c088703777e2 100644
1746 --- a/drivers/gpu/drm/ttm/ttm_bo.c
1747 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
1748 @@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
1749 ttm_tt_destroy(bo->ttm);
1750 atomic_dec(&bo->glob->bo_count);
1751 dma_fence_put(bo->moving);
1752 - if (bo->resv == &bo->ttm_resv)
1753 - reservation_object_fini(&bo->ttm_resv);
1754 + reservation_object_fini(&bo->ttm_resv);
1755 mutex_destroy(&bo->wu_mutex);
1756 if (bo->destroy)
1757 bo->destroy(bo);
1758 @@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
1759 if (bo->resv == &bo->ttm_resv)
1760 return 0;
1761
1762 - reservation_object_init(&bo->ttm_resv);
1763 BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
1764
1765 r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
1766 - if (r) {
1767 + if (r)
1768 reservation_object_unlock(&bo->ttm_resv);
1769 - reservation_object_fini(&bo->ttm_resv);
1770 - }
1771
1772 return r;
1773 }
1774 @@ -440,28 +436,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
1775 struct ttm_bo_global *glob = bo->glob;
1776 int ret;
1777
1778 + ret = ttm_bo_individualize_resv(bo);
1779 + if (ret) {
1780 + /* Last resort, if we fail to allocate memory for the
1781 + * fences block for the BO to become idle
1782 + */
1783 + reservation_object_wait_timeout_rcu(bo->resv, true, false,
1784 + 30 * HZ);
1785 + spin_lock(&glob->lru_lock);
1786 + goto error;
1787 + }
1788 +
1789 spin_lock(&glob->lru_lock);
1790 ret = __ttm_bo_reserve(bo, false, true, NULL);
1791 -
1792 if (!ret) {
1793 - if (!ttm_bo_wait(bo, false, true)) {
1794 + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
1795 ttm_bo_del_from_lru(bo);
1796 spin_unlock(&glob->lru_lock);
1797 - ttm_bo_cleanup_memtype_use(bo);
1798 + if (bo->resv != &bo->ttm_resv)
1799 + reservation_object_unlock(&bo->ttm_resv);
1800
1801 - return;
1802 - }
1803 -
1804 - ret = ttm_bo_individualize_resv(bo);
1805 - if (ret) {
1806 - /* Last resort, if we fail to allocate memory for the
1807 - * fences block for the BO to become idle and free it.
1808 - */
1809 - spin_unlock(&glob->lru_lock);
1810 - ttm_bo_wait(bo, true, true);
1811 ttm_bo_cleanup_memtype_use(bo);
1812 return;
1813 }
1814 +
1815 ttm_bo_flush_all_fences(bo);
1816
1817 /*
1818 @@ -474,11 +472,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
1819 ttm_bo_add_to_lru(bo);
1820 }
1821
1822 - if (bo->resv != &bo->ttm_resv)
1823 - reservation_object_unlock(&bo->ttm_resv);
1824 __ttm_bo_unreserve(bo);
1825 }
1826 + if (bo->resv != &bo->ttm_resv)
1827 + reservation_object_unlock(&bo->ttm_resv);
1828
1829 +error:
1830 kref_get(&bo->list_kref);
1831 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
1832 spin_unlock(&glob->lru_lock);
1833 @@ -1203,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1834 lockdep_assert_held(&bo->resv->lock.base);
1835 } else {
1836 bo->resv = &bo->ttm_resv;
1837 - reservation_object_init(&bo->ttm_resv);
1838 }
1839 + reservation_object_init(&bo->ttm_resv);
1840 atomic_inc(&bo->glob->bo_count);
1841 drm_vma_node_reset(&bo->vma_node);
1842 bo->priority = 0;
1843 diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
1844 index c934ad5b3903..7c2fbdbbd048 100644
1845 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
1846 +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
1847 @@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
1848 INIT_LIST_HEAD(&fbo->lru);
1849 INIT_LIST_HEAD(&fbo->swap);
1850 INIT_LIST_HEAD(&fbo->io_reserve_lru);
1851 + mutex_init(&fbo->wu_mutex);
1852 fbo->moving = NULL;
1853 drm_vma_node_reset(&fbo->vma_node);
1854 atomic_set(&fbo->cpu_writers, 0);
1855 diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
1856 index 937da8dd65b8..8f71157a2b06 100644
1857 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c
1858 +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
1859 @@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
1860 vc4_encoder->limited_rgb_range ?
1861 HDMI_QUANTIZATION_RANGE_LIMITED :
1862 HDMI_QUANTIZATION_RANGE_FULL,
1863 - vc4_encoder->rgb_range_selectable);
1864 + vc4_encoder->rgb_range_selectable,
1865 + false);
1866
1867 vc4_hdmi_write_infoframe(encoder, &frame);
1868 }
1869 diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
1870 index 5f11dc014ed6..e5234f953a6d 100644
1871 --- a/drivers/hwmon/jc42.c
1872 +++ b/drivers/hwmon/jc42.c
1873 @@ -22,6 +22,7 @@
1874 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1875 */
1876
1877 +#include <linux/bitops.h>
1878 #include <linux/module.h>
1879 #include <linux/init.h>
1880 #include <linux/slab.h>
1881 @@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
1882 #define JC42_REG_TEMP 0x05
1883 #define JC42_REG_MANID 0x06
1884 #define JC42_REG_DEVICEID 0x07
1885 +#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */
1886
1887 /* Status bits in temperature register */
1888 #define JC42_ALARM_CRIT_BIT 15
1889 @@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
1890 #define GT_MANID 0x1c68 /* Giantec */
1891 #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */
1892
1893 +/* SMBUS register */
1894 +#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */
1895 +
1896 /* Supported chips */
1897
1898 /* Analog Devices */
1899 @@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
1900
1901 data->extended = !!(cap & JC42_CAP_RANGE);
1902
1903 + if (device_property_read_bool(dev, "smbus-timeout-disable")) {
1904 + int smbus;
1905 +
1906 + /*
1907 + * Not all chips support this register, but from a
1908 + * quick read of various datasheets no chip appears
1909 + * incompatible with the below attempt to disable
1910 + * the timeout. And the whole thing is opt-in...
1911 + */
1912 + smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
1913 + if (smbus < 0)
1914 + return smbus;
1915 + i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
1916 + smbus | SMBUS_STMOUT);
1917 + }
1918 +
1919 config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
1920 if (config < 0)
1921 return config;
1922 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1923 index 9e12a53ef7b8..8eac00efadc1 100644
1924 --- a/drivers/i2c/busses/i2c-i801.c
1925 +++ b/drivers/i2c/busses/i2c-i801.c
1926 @@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1927 /* Default timeout in interrupt mode: 200 ms */
1928 priv->adapter.timeout = HZ / 5;
1929
1930 + if (dev->irq == IRQ_NOTCONNECTED)
1931 + priv->features &= ~FEATURE_IRQ;
1932 +
1933 if (priv->features & FEATURE_IRQ) {
1934 u16 pcictl, pcists;
1935
1936 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
1937 index 21e60b1e2ff4..130606c3b07c 100644
1938 --- a/drivers/infiniband/core/umem.c
1939 +++ b/drivers/infiniband/core/umem.c
1940 @@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
1941 sg_list_start = umem->sg_head.sgl;
1942
1943 while (npages) {
1944 - ret = get_user_pages(cur_base,
1945 + ret = get_user_pages_longterm(cur_base,
1946 min_t(unsigned long, npages,
1947 PAGE_SIZE / sizeof (struct page *)),
1948 gup_flags, page_list, vma_list);
1949 diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1950 index c1696e6084b2..603acaf91828 100644
1951 --- a/drivers/infiniband/core/user_mad.c
1952 +++ b/drivers/infiniband/core/user_mad.c
1953 @@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
1954 packet->mad.hdr.status = 0;
1955 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
1956 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
1957 - packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
1958 + /*
1959 + * On OPA devices it is okay to lose the upper 16 bits of LID as this
1960 + * information is obtained elsewhere. Mask off the upper 16 bits.
1961 + */
1962 + if (agent->device->port_immutable[agent->port_num].core_cap_flags &
1963 + RDMA_CORE_PORT_INTEL_OPA)
1964 + packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
1965 + mad_recv_wc->wc->slid);
1966 + else
1967 + packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
1968 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
1969 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
1970 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
1971 diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
1972 index f4c0ffc040cc..07b80faf1675 100644
1973 --- a/drivers/infiniband/hw/hfi1/mad.c
1974 +++ b/drivers/infiniband/hw/hfi1/mad.c
1975 @@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
1976 const struct ib_wc *in_wc)
1977 {
1978 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1979 - u16 slid = ib_lid_cpu16(in_wc->slid);
1980 u16 pkey;
1981
1982 if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
1983 @@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
1984 */
1985 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
1986 return 0;
1987 - ingress_pkey_table_fail(ppd, pkey, slid);
1988 + /*
1989 + * On OPA devices it is okay to lose the upper 16 bits of LID as this
1990 + * information is obtained elsewhere. Mask off the upper 16 bits.
1991 + */
1992 + ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
1993 return 1;
1994 }
1995
1996 diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
1997 index c9934139d609..934b1fce4ce1 100644
1998 --- a/drivers/md/bcache/alloc.c
1999 +++ b/drivers/md/bcache/alloc.c
2000 @@ -480,7 +480,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
2001 if (b == -1)
2002 goto err;
2003
2004 - k->ptr[i] = PTR(ca->buckets[b].gen,
2005 + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
2006 bucket_to_sector(c, b),
2007 ca->sb.nr_this_dev);
2008
2009 diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
2010 index 41c238fc3733..f9d391711595 100644
2011 --- a/drivers/md/bcache/extents.c
2012 +++ b/drivers/md/bcache/extents.c
2013 @@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
2014 return false;
2015
2016 for (i = 0; i < KEY_PTRS(l); i++)
2017 - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
2018 + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
2019 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
2020 return false;
2021
2022 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
2023 index 02a98ddb592d..03cc0722ae48 100644
2024 --- a/drivers/md/bcache/journal.c
2025 +++ b/drivers/md/bcache/journal.c
2026 @@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c)
2027 continue;
2028
2029 ja->cur_idx = next;
2030 - k->ptr[n++] = PTR(0,
2031 + k->ptr[n++] = MAKE_PTR(0,
2032 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
2033 ca->sb.nr_this_dev);
2034 }
2035 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
2036 index 3475d6628e21..14d13cab5cda 100644
2037 --- a/drivers/md/bcache/request.c
2038 +++ b/drivers/md/bcache/request.c
2039 @@ -699,7 +699,14 @@ static void cached_dev_read_error(struct closure *cl)
2040 struct search *s = container_of(cl, struct search, cl);
2041 struct bio *bio = &s->bio.bio;
2042
2043 - if (s->recoverable) {
2044 + /*
2045 + * If read request hit dirty data (s->read_dirty_data is true),
2046 + * then recovery a failed read request from cached device may
2047 + * get a stale data back. So read failure recovery is only
2048 + * permitted when read request hit clean data in cache device,
2049 + * or when cache read race happened.
2050 + */
2051 + if (s->recoverable && !s->read_dirty_data) {
2052 /* Retry from the backing device: */
2053 trace_bcache_read_retry(s->orig_bio);
2054
2055 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
2056 index cae57b5be817..f425905c97fa 100644
2057 --- a/drivers/md/bitmap.c
2058 +++ b/drivers/md/bitmap.c
2059 @@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
2060
2061 BUG_ON(file && mddev->bitmap_info.offset);
2062
2063 + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
2064 + pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
2065 + mdname(mddev));
2066 + return ERR_PTR(-EBUSY);
2067 + }
2068 +
2069 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
2070 if (!bitmap)
2071 return ERR_PTR(-ENOMEM);
2072 diff --git a/drivers/md/md.c b/drivers/md/md.c
2073 index e019cf8c0d13..98ea86309ceb 100644
2074 --- a/drivers/md/md.c
2075 +++ b/drivers/md/md.c
2076 @@ -6362,7 +6362,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
2077 break;
2078 }
2079 }
2080 - if (has_journal) {
2081 + if (has_journal || mddev->bitmap) {
2082 export_rdev(rdev);
2083 return -EBUSY;
2084 }
2085 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2086 index 928e24a07133..7aed69a4f655 100644
2087 --- a/drivers/md/raid5.c
2088 +++ b/drivers/md/raid5.c
2089 @@ -7156,6 +7156,13 @@ static int raid5_run(struct mddev *mddev)
2090 min_offset_diff = diff;
2091 }
2092
2093 + if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
2094 + (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
2095 + pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
2096 + mdname(mddev));
2097 + return -EINVAL;
2098 + }
2099 +
2100 if (mddev->reshape_position != MaxSector) {
2101 /* Check that we can continue the reshape.
2102 * Difficulties arise if the stripe we would write to
2103 diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
2104 index 0b5c43f7e020..f412429cf5ba 100644
2105 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c
2106 +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
2107 @@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
2108 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
2109 data, size, dma->nr_pages);
2110
2111 - err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
2112 + err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
2113 flags, dma->pages, NULL);
2114
2115 if (err != dma->nr_pages) {
2116 dma->nr_pages = (err >= 0) ? err : 0;
2117 - dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
2118 + dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
2119 + dma->nr_pages);
2120 return err < 0 ? err : -EINVAL;
2121 }
2122 return 0;
2123 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
2124 index 3ba04f371380..81093f8157a9 100644
2125 --- a/drivers/misc/cxl/pci.c
2126 +++ b/drivers/misc/cxl/pci.c
2127 @@ -2043,6 +2043,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
2128 /* There should only be one entry, but go through the list
2129 * anyway
2130 */
2131 + if (afu->phb == NULL)
2132 + return result;
2133 +
2134 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2135 if (!afu_dev->driver)
2136 continue;
2137 @@ -2084,8 +2087,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
2138 * Tell the AFU drivers; but we don't care what they
2139 * say, we're going away.
2140 */
2141 - if (afu->phb != NULL)
2142 - cxl_vphb_error_detected(afu, state);
2143 + cxl_vphb_error_detected(afu, state);
2144 }
2145 return PCI_ERS_RESULT_DISCONNECT;
2146 }
2147 @@ -2225,6 +2227,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
2148 if (cxl_afu_select_best_mode(afu))
2149 goto err;
2150
2151 + if (afu->phb == NULL)
2152 + continue;
2153 +
2154 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2155 /* Reset the device context.
2156 * TODO: make this less disruptive
2157 @@ -2287,6 +2292,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
2158 for (i = 0; i < adapter->slices; i++) {
2159 afu = adapter->afu[i];
2160
2161 + if (afu->phb == NULL)
2162 + continue;
2163 +
2164 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2165 if (afu_dev->driver && afu_dev->driver->err_handler &&
2166 afu_dev->driver->err_handler->resume)
2167 diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
2168 index 764ff5df0dbc..372b2060fbba 100644
2169 --- a/drivers/misc/eeprom/at24.c
2170 +++ b/drivers/misc/eeprom/at24.c
2171 @@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
2172 memset(msg, 0, sizeof(msg));
2173 msg[0].addr = client->addr;
2174 msg[0].buf = addrbuf;
2175 - addrbuf[0] = 0x90 + offset;
2176 + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
2177 + addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
2178 msg[0].len = 1;
2179 msg[1].addr = client->addr;
2180 msg[1].flags = I2C_M_RD;
2181 @@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
2182 if (unlikely(!count))
2183 return count;
2184
2185 + if (off + count > at24->chip.byte_len)
2186 + return -EINVAL;
2187 +
2188 /*
2189 * Read data from chip, protecting against concurrent updates
2190 * from this host, but not from other I2C masters.
2191 @@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
2192 if (unlikely(!count))
2193 return -EINVAL;
2194
2195 + if (off + count > at24->chip.byte_len)
2196 + return -EINVAL;
2197 +
2198 /*
2199 * Write data to chip, protecting against concurrent updates
2200 * from this host, but not from other I2C masters.
2201 @@ -631,6 +638,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
2202 dev_warn(&client->dev,
2203 "page_size looks suspicious (no power of 2)!\n");
2204
2205 + /*
2206 + * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
2207 + * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
2208 + *
2209 + * Eventually we'll get rid of the magic values altoghether in favor of
2210 + * real structs, but for now just manually set the right size.
2211 + */
2212 + if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
2213 + chip.byte_len = 6;
2214 +
2215 /* Use I2C operations unless we're stuck with SMBus extensions. */
2216 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
2217 if (chip.flags & AT24_FLAG_ADDR16)
2218 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2219 index 2ad7b5c69156..ccb516f18d72 100644
2220 --- a/drivers/mmc/core/block.c
2221 +++ b/drivers/mmc/core/block.c
2222 @@ -119,6 +119,10 @@ struct mmc_blk_data {
2223 struct device_attribute force_ro;
2224 struct device_attribute power_ro_lock;
2225 int area_type;
2226 +
2227 + /* debugfs files (only in main mmc_blk_data) */
2228 + struct dentry *status_dentry;
2229 + struct dentry *ext_csd_dentry;
2230 };
2231
2232 static DEFINE_MUTEX(open_lock);
2233 @@ -204,9 +208,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
2234
2235 /* Dispatch locking to the block layer */
2236 req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
2237 + if (IS_ERR(req)) {
2238 + count = PTR_ERR(req);
2239 + goto out_put;
2240 + }
2241 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
2242 blk_execute_rq(mq->queue, NULL, req, 0);
2243 ret = req_to_mmc_queue_req(req)->drv_op_result;
2244 + blk_put_request(req);
2245
2246 if (!ret) {
2247 pr_info("%s: Locking boot partition ro until next power on\n",
2248 @@ -219,7 +228,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
2249 set_disk_ro(part_md->disk, 1);
2250 }
2251 }
2252 -
2253 +out_put:
2254 mmc_blk_put(md);
2255 return count;
2256 }
2257 @@ -580,6 +589,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
2258 req = blk_get_request(mq->queue,
2259 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
2260 __GFP_RECLAIM);
2261 + if (IS_ERR(req)) {
2262 + err = PTR_ERR(req);
2263 + goto cmd_done;
2264 + }
2265 idatas[0] = idata;
2266 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
2267 req_to_mmc_queue_req(req)->drv_op_data = idatas;
2268 @@ -643,6 +656,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
2269 req = blk_get_request(mq->queue,
2270 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
2271 __GFP_RECLAIM);
2272 + if (IS_ERR(req)) {
2273 + err = PTR_ERR(req);
2274 + goto cmd_err;
2275 + }
2276 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
2277 req_to_mmc_queue_req(req)->drv_op_data = idata;
2278 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
2279 @@ -2314,6 +2331,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
2280
2281 /* Ask the block layer about the card status */
2282 req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2283 + if (IS_ERR(req))
2284 + return PTR_ERR(req);
2285 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2286 blk_execute_rq(mq->queue, NULL, req, 0);
2287 ret = req_to_mmc_queue_req(req)->drv_op_result;
2288 @@ -2321,6 +2340,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
2289 *val = ret;
2290 ret = 0;
2291 }
2292 + blk_put_request(req);
2293
2294 return ret;
2295 }
2296 @@ -2347,10 +2367,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2297
2298 /* Ask the block layer for the EXT CSD */
2299 req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
2300 + if (IS_ERR(req)) {
2301 + err = PTR_ERR(req);
2302 + goto out_free;
2303 + }
2304 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2305 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2306 blk_execute_rq(mq->queue, NULL, req, 0);
2307 err = req_to_mmc_queue_req(req)->drv_op_result;
2308 + blk_put_request(req);
2309 if (err) {
2310 pr_err("FAILED %d\n", err);
2311 goto out_free;
2312 @@ -2396,7 +2421,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
2313 .llseek = default_llseek,
2314 };
2315
2316 -static int mmc_blk_add_debugfs(struct mmc_card *card)
2317 +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2318 {
2319 struct dentry *root;
2320
2321 @@ -2406,28 +2431,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
2322 root = card->debugfs_root;
2323
2324 if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2325 - if (!debugfs_create_file("status", S_IRUSR, root, card,
2326 - &mmc_dbg_card_status_fops))
2327 + md->status_dentry =
2328 + debugfs_create_file("status", S_IRUSR, root, card,
2329 + &mmc_dbg_card_status_fops);
2330 + if (!md->status_dentry)
2331 return -EIO;
2332 }
2333
2334 if (mmc_card_mmc(card)) {
2335 - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
2336 - &mmc_dbg_ext_csd_fops))
2337 + md->ext_csd_dentry =
2338 + debugfs_create_file("ext_csd", S_IRUSR, root, card,
2339 + &mmc_dbg_ext_csd_fops);
2340 + if (!md->ext_csd_dentry)
2341 return -EIO;
2342 }
2343
2344 return 0;
2345 }
2346
2347 +static void mmc_blk_remove_debugfs(struct mmc_card *card,
2348 + struct mmc_blk_data *md)
2349 +{
2350 + if (!card->debugfs_root)
2351 + return;
2352 +
2353 + if (!IS_ERR_OR_NULL(md->status_dentry)) {
2354 + debugfs_remove(md->status_dentry);
2355 + md->status_dentry = NULL;
2356 + }
2357 +
2358 + if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
2359 + debugfs_remove(md->ext_csd_dentry);
2360 + md->ext_csd_dentry = NULL;
2361 + }
2362 +}
2363
2364 #else
2365
2366 -static int mmc_blk_add_debugfs(struct mmc_card *card)
2367 +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2368 {
2369 return 0;
2370 }
2371
2372 +static void mmc_blk_remove_debugfs(struct mmc_card *card,
2373 + struct mmc_blk_data *md)
2374 +{
2375 +}
2376 +
2377 #endif /* CONFIG_DEBUG_FS */
2378
2379 static int mmc_blk_probe(struct mmc_card *card)
2380 @@ -2467,7 +2517,7 @@ static int mmc_blk_probe(struct mmc_card *card)
2381 }
2382
2383 /* Add two debugfs entries */
2384 - mmc_blk_add_debugfs(card);
2385 + mmc_blk_add_debugfs(card, md);
2386
2387 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2388 pm_runtime_use_autosuspend(&card->dev);
2389 @@ -2493,6 +2543,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2390 {
2391 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2392
2393 + mmc_blk_remove_debugfs(card, md);
2394 mmc_blk_remove_parts(card, md);
2395 pm_runtime_get_sync(&card->dev);
2396 mmc_claim_host(card->host);
2397 diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
2398 index 301246513a37..7f428e387de3 100644
2399 --- a/drivers/mmc/core/bus.c
2400 +++ b/drivers/mmc/core/bus.c
2401 @@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
2402 return ret;
2403
2404 ret = host->bus_ops->suspend(host);
2405 + if (ret)
2406 + pm_generic_resume(dev);
2407 +
2408 return ret;
2409 }
2410
2411 diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
2412 index 01e459a34f33..0f4a7d7b2626 100644
2413 --- a/drivers/mmc/core/debugfs.c
2414 +++ b/drivers/mmc/core/debugfs.c
2415 @@ -314,4 +314,5 @@ void mmc_add_card_debugfs(struct mmc_card *card)
2416 void mmc_remove_card_debugfs(struct mmc_card *card)
2417 {
2418 debugfs_remove_recursive(card->debugfs_root);
2419 + card->debugfs_root = NULL;
2420 }
2421 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
2422 index 36217ad5e9b1..bad5c1bf4ed9 100644
2423 --- a/drivers/mmc/core/mmc.c
2424 +++ b/drivers/mmc/core/mmc.c
2425 @@ -780,7 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
2426 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
2427 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
2428 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
2429 -MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
2430 +MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
2431 MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
2432 card->ext_csd.device_life_time_est_typ_a,
2433 card->ext_csd.device_life_time_est_typ_b);
2434 @@ -790,7 +790,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
2435 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
2436 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
2437 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
2438 -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
2439 +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
2440 MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
2441
2442 static ssize_t mmc_fwrev_show(struct device *dev,
2443 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
2444 index 4fd1620b732d..eb9de2134967 100644
2445 --- a/drivers/mmc/core/sd.c
2446 +++ b/drivers/mmc/core/sd.c
2447 @@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
2448 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
2449 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
2450 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
2451 -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
2452 +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
2453
2454
2455 static ssize_t mmc_dsr_show(struct device *dev,
2456 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2457 index 0d5fcca18c9e..6152e83ff935 100644
2458 --- a/drivers/mmc/host/sdhci.c
2459 +++ b/drivers/mmc/host/sdhci.c
2460 @@ -21,6 +21,7 @@
2461 #include <linux/dma-mapping.h>
2462 #include <linux/slab.h>
2463 #include <linux/scatterlist.h>
2464 +#include <linux/swiotlb.h>
2465 #include <linux/regulator/consumer.h>
2466 #include <linux/pm_runtime.h>
2467 #include <linux/of.h>
2468 @@ -3650,23 +3651,30 @@ int sdhci_setup_host(struct sdhci_host *host)
2469
2470 spin_lock_init(&host->lock);
2471
2472 + /*
2473 + * Maximum number of sectors in one transfer. Limited by SDMA boundary
2474 + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
2475 + * is less anyway.
2476 + */
2477 + mmc->max_req_size = 524288;
2478 +
2479 /*
2480 * Maximum number of segments. Depends on if the hardware
2481 * can do scatter/gather or not.
2482 */
2483 - if (host->flags & SDHCI_USE_ADMA)
2484 + if (host->flags & SDHCI_USE_ADMA) {
2485 mmc->max_segs = SDHCI_MAX_SEGS;
2486 - else if (host->flags & SDHCI_USE_SDMA)
2487 + } else if (host->flags & SDHCI_USE_SDMA) {
2488 mmc->max_segs = 1;
2489 - else /* PIO */
2490 + if (swiotlb_max_segment()) {
2491 + unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
2492 + IO_TLB_SEGSIZE;
2493 + mmc->max_req_size = min(mmc->max_req_size,
2494 + max_req_size);
2495 + }
2496 + } else { /* PIO */
2497 mmc->max_segs = SDHCI_MAX_SEGS;
2498 -
2499 - /*
2500 - * Maximum number of sectors in one transfer. Limited by SDMA boundary
2501 - * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
2502 - * is less anyway.
2503 - */
2504 - mmc->max_req_size = 524288;
2505 + }
2506
2507 /*
2508 * Maximum segment size. Could be one segment with the maximum number
2509 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
2510 index 67163ca898ba..00a36df02a3f 100644
2511 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
2512 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
2513 @@ -113,7 +113,8 @@
2514 #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
2515 #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
2516 #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
2517 -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
2518 +#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
2519 +#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
2520 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
2521
2522 #define E1000_ICH_RAR_ENTRIES 7
2523 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2524 index c38b00c90f48..991c2a0dd67e 100644
2525 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
2526 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2527 @@ -3030,9 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2528 ew32(IOSFPC, reg_val);
2529
2530 reg_val = er32(TARC(0));
2531 - /* SPT and KBL Si errata workaround to avoid Tx hang */
2532 - reg_val &= ~BIT(28);
2533 - reg_val |= BIT(29);
2534 + /* SPT and KBL Si errata workaround to avoid Tx hang.
2535 + * Dropping the number of outstanding requests from
2536 + * 3 to 2 in order to avoid a buffer overrun.
2537 + */
2538 + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
2539 + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
2540 ew32(TARC(0), reg_val);
2541 }
2542 }
2543 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2544 index d3f3c4447515..044af553204c 100644
2545 --- a/drivers/nvme/host/nvme.h
2546 +++ b/drivers/nvme/host/nvme.h
2547 @@ -108,7 +108,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
2548 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
2549 * found empirically.
2550 */
2551 -#define NVME_QUIRK_DELAY_AMOUNT 2000
2552 +#define NVME_QUIRK_DELAY_AMOUNT 2300
2553
2554 enum nvme_ctrl_state {
2555 NVME_CTRL_NEW,
2556 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2557 index 3f5a04c586ce..75539f7c58b9 100644
2558 --- a/drivers/nvme/host/pci.c
2559 +++ b/drivers/nvme/host/pci.c
2560 @@ -2519,6 +2519,8 @@ static const struct pci_device_id nvme_id_table[] = {
2561 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2562 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
2563 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2564 + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
2565 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2566 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
2567 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2568 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
2569 diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
2570 index b4ed3dc983d5..b4224389febe 100644
2571 --- a/drivers/platform/x86/hp-wmi.c
2572 +++ b/drivers/platform/x86/hp-wmi.c
2573 @@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
2574 if (state < 0)
2575 return state;
2576
2577 - return state & 0x1;
2578 + return !!(state & mask);
2579 }
2580
2581 static int __init hp_wmi_bios_2008_later(void)
2582 diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
2583 index d79ced925861..82e8f6edfb48 100644
2584 --- a/fs/autofs4/root.c
2585 +++ b/fs/autofs4/root.c
2586 @@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
2587 pr_debug("waiting for mount name=%pd\n", path->dentry);
2588 status = autofs4_wait(sbi, path, NFY_MOUNT);
2589 pr_debug("mount wait done status=%d\n", status);
2590 - ino->last_used = jiffies;
2591 }
2592 + ino->last_used = jiffies;
2593 return status;
2594 }
2595
2596 @@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
2597 */
2598 if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
2599 struct dentry *parent = dentry->d_parent;
2600 + struct autofs_info *ino;
2601 struct dentry *new;
2602
2603 new = d_lookup(parent, &dentry->d_name);
2604 if (!new)
2605 return NULL;
2606 - if (new == dentry)
2607 - dput(new);
2608 - else {
2609 - struct autofs_info *ino;
2610 -
2611 - ino = autofs4_dentry_ino(new);
2612 - ino->last_used = jiffies;
2613 - dput(path->dentry);
2614 - path->dentry = new;
2615 - }
2616 + ino = autofs4_dentry_ino(new);
2617 + ino->last_used = jiffies;
2618 + dput(path->dentry);
2619 + path->dentry = new;
2620 }
2621 return path->dentry;
2622 }
2623 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2624 index 08698105fa4a..e4774c02d922 100644
2625 --- a/fs/btrfs/extent-tree.c
2626 +++ b/fs/btrfs/extent-tree.c
2627 @@ -3526,13 +3526,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2628 goto again;
2629 }
2630
2631 - /* We've already setup this transaction, go ahead and exit */
2632 - if (block_group->cache_generation == trans->transid &&
2633 - i_size_read(inode)) {
2634 - dcs = BTRFS_DC_SETUP;
2635 - goto out_put;
2636 - }
2637 -
2638 /*
2639 * We want to set the generation to 0, that way if anything goes wrong
2640 * from here on out we know not to trust this cache when we load up next
2641 @@ -3556,6 +3549,13 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2642 }
2643 WARN_ON(ret);
2644
2645 + /* We've already setup this transaction, go ahead and exit */
2646 + if (block_group->cache_generation == trans->transid &&
2647 + i_size_read(inode)) {
2648 + dcs = BTRFS_DC_SETUP;
2649 + goto out_put;
2650 + }
2651 +
2652 if (i_size_read(inode) > 0) {
2653 ret = btrfs_check_trunc_cache_free_space(fs_info,
2654 &fs_info->global_block_rsv);
2655 diff --git a/fs/exec.c b/fs/exec.c
2656 index 3e14ba25f678..4726c777dd38 100644
2657 --- a/fs/exec.c
2658 +++ b/fs/exec.c
2659 @@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
2660 * avoid bad behavior from the prior rlimits. This has to
2661 * happen before arch_pick_mmap_layout(), which examines
2662 * RLIMIT_STACK, but after the point of no return to avoid
2663 - * needing to clean up the change on failure.
2664 + * races from other threads changing the limits. This also
2665 + * must be protected from races with prlimit() calls.
2666 */
2667 + task_lock(current->group_leader);
2668 if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
2669 current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
2670 + if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
2671 + current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
2672 + task_unlock(current->group_leader);
2673 }
2674
2675 arch_pick_mmap_layout(current->mm);
2676 diff --git a/fs/fat/inode.c b/fs/fat/inode.c
2677 index 30c52394a7ad..c7a4dee206b9 100644
2678 --- a/fs/fat/inode.c
2679 +++ b/fs/fat/inode.c
2680 @@ -779,7 +779,7 @@ static void __exit fat_destroy_inodecache(void)
2681
2682 static int fat_remount(struct super_block *sb, int *flags, char *data)
2683 {
2684 - int new_rdonly;
2685 + bool new_rdonly;
2686 struct msdos_sb_info *sbi = MSDOS_SB(sb);
2687 *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
2688
2689 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
2690 index f04ecfc7ece0..45e96549ebd2 100644
2691 --- a/fs/lockd/svc.c
2692 +++ b/fs/lockd/svc.c
2693 @@ -274,6 +274,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
2694 if (ln->nlmsvc_users) {
2695 if (--ln->nlmsvc_users == 0) {
2696 nlm_shutdown_hosts_net(net);
2697 + cancel_delayed_work_sync(&ln->grace_period_end);
2698 + locks_end_grace(&ln->lockd_manager);
2699 svc_shutdown_net(serv, net);
2700 dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
2701 }
2702 diff --git a/fs/namei.c b/fs/namei.c
2703 index ed8b9488a890..62a0db6e6725 100644
2704 --- a/fs/namei.c
2705 +++ b/fs/namei.c
2706 @@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
2707 * of the daemon to instantiate them before they can be used.
2708 */
2709 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
2710 - LOOKUP_OPEN | LOOKUP_CREATE |
2711 - LOOKUP_AUTOMOUNT))) {
2712 - /* Positive dentry that isn't meant to trigger an
2713 - * automount, EISDIR will allow it to be used,
2714 - * otherwise there's no mount here "now" so return
2715 - * ENOENT.
2716 - */
2717 - if (path->dentry->d_inode)
2718 - return -EISDIR;
2719 - else
2720 - return -ENOENT;
2721 - }
2722 + LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
2723 + path->dentry->d_inode)
2724 + return -EISDIR;
2725
2726 if (path->dentry->d_sb->s_user_ns != &init_user_ns)
2727 return -EACCES;
2728 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2729 index d386d569edbc..a439a70177a4 100644
2730 --- a/fs/nfsd/nfs4state.c
2731 +++ b/fs/nfsd/nfs4state.c
2732 @@ -3512,7 +3512,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2733 /* ignore lock owners */
2734 if (local->st_stateowner->so_is_open_owner == 0)
2735 continue;
2736 - if (local->st_stateowner == &oo->oo_owner) {
2737 + if (local->st_stateowner != &oo->oo_owner)
2738 + continue;
2739 + if (local->st_stid.sc_type == NFS4_OPEN_STID) {
2740 ret = local;
2741 atomic_inc(&ret->st_stid.sc_count);
2742 break;
2743 @@ -3521,6 +3523,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2744 return ret;
2745 }
2746
2747 +static __be32
2748 +nfsd4_verify_open_stid(struct nfs4_stid *s)
2749 +{
2750 + __be32 ret = nfs_ok;
2751 +
2752 + switch (s->sc_type) {
2753 + default:
2754 + break;
2755 + case NFS4_CLOSED_STID:
2756 + case NFS4_CLOSED_DELEG_STID:
2757 + ret = nfserr_bad_stateid;
2758 + break;
2759 + case NFS4_REVOKED_DELEG_STID:
2760 + ret = nfserr_deleg_revoked;
2761 + }
2762 + return ret;
2763 +}
2764 +
2765 +/* Lock the stateid st_mutex, and deal with races with CLOSE */
2766 +static __be32
2767 +nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
2768 +{
2769 + __be32 ret;
2770 +
2771 + mutex_lock(&stp->st_mutex);
2772 + ret = nfsd4_verify_open_stid(&stp->st_stid);
2773 + if (ret != nfs_ok)
2774 + mutex_unlock(&stp->st_mutex);
2775 + return ret;
2776 +}
2777 +
2778 +static struct nfs4_ol_stateid *
2779 +nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
2780 +{
2781 + struct nfs4_ol_stateid *stp;
2782 + for (;;) {
2783 + spin_lock(&fp->fi_lock);
2784 + stp = nfsd4_find_existing_open(fp, open);
2785 + spin_unlock(&fp->fi_lock);
2786 + if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
2787 + break;
2788 + nfs4_put_stid(&stp->st_stid);
2789 + }
2790 + return stp;
2791 +}
2792 +
2793 static struct nfs4_openowner *
2794 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2795 struct nfsd4_compound_state *cstate)
2796 @@ -3565,6 +3613,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
2797 mutex_init(&stp->st_mutex);
2798 mutex_lock(&stp->st_mutex);
2799
2800 +retry:
2801 spin_lock(&oo->oo_owner.so_client->cl_lock);
2802 spin_lock(&fp->fi_lock);
2803
2804 @@ -3589,7 +3638,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
2805 spin_unlock(&fp->fi_lock);
2806 spin_unlock(&oo->oo_owner.so_client->cl_lock);
2807 if (retstp) {
2808 - mutex_lock(&retstp->st_mutex);
2809 + /* Handle races with CLOSE */
2810 + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
2811 + nfs4_put_stid(&retstp->st_stid);
2812 + goto retry;
2813 + }
2814 /* To keep mutex tracking happy */
2815 mutex_unlock(&stp->st_mutex);
2816 stp = retstp;
2817 @@ -4399,6 +4452,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2818 struct nfs4_ol_stateid *stp = NULL;
2819 struct nfs4_delegation *dp = NULL;
2820 __be32 status;
2821 + bool new_stp = false;
2822
2823 /*
2824 * Lookup file; if found, lookup stateid and check open request,
2825 @@ -4410,9 +4464,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2826 status = nfs4_check_deleg(cl, open, &dp);
2827 if (status)
2828 goto out;
2829 - spin_lock(&fp->fi_lock);
2830 - stp = nfsd4_find_existing_open(fp, open);
2831 - spin_unlock(&fp->fi_lock);
2832 + stp = nfsd4_find_and_lock_existing_open(fp, open);
2833 } else {
2834 open->op_file = NULL;
2835 status = nfserr_bad_stateid;
2836 @@ -4420,35 +4472,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2837 goto out;
2838 }
2839
2840 + if (!stp) {
2841 + stp = init_open_stateid(fp, open);
2842 + if (!open->op_stp)
2843 + new_stp = true;
2844 + }
2845 +
2846 /*
2847 * OPEN the file, or upgrade an existing OPEN.
2848 * If truncate fails, the OPEN fails.
2849 + *
2850 + * stp is already locked.
2851 */
2852 - if (stp) {
2853 + if (!new_stp) {
2854 /* Stateid was found, this is an OPEN upgrade */
2855 - mutex_lock(&stp->st_mutex);
2856 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2857 if (status) {
2858 mutex_unlock(&stp->st_mutex);
2859 goto out;
2860 }
2861 } else {
2862 - /* stp is returned locked. */
2863 - stp = init_open_stateid(fp, open);
2864 - /* See if we lost the race to some other thread */
2865 - if (stp->st_access_bmap != 0) {
2866 - status = nfs4_upgrade_open(rqstp, fp, current_fh,
2867 - stp, open);
2868 - if (status) {
2869 - mutex_unlock(&stp->st_mutex);
2870 - goto out;
2871 - }
2872 - goto upgrade_out;
2873 - }
2874 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
2875 if (status) {
2876 - mutex_unlock(&stp->st_mutex);
2877 + stp->st_stid.sc_type = NFS4_CLOSED_STID;
2878 release_open_stateid(stp);
2879 + mutex_unlock(&stp->st_mutex);
2880 goto out;
2881 }
2882
2883 @@ -4457,7 +4505,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2884 if (stp->st_clnt_odstate == open->op_odstate)
2885 open->op_odstate = NULL;
2886 }
2887 -upgrade_out:
2888 +
2889 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
2890 mutex_unlock(&stp->st_mutex);
2891
2892 @@ -4684,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn)
2893 spin_unlock(&nn->blocked_locks_lock);
2894
2895 while (!list_empty(&reaplist)) {
2896 - nbl = list_first_entry(&nn->blocked_locks_lru,
2897 + nbl = list_first_entry(&reaplist,
2898 struct nfsd4_blocked_lock, nbl_lru);
2899 list_del_init(&nbl->nbl_lru);
2900 posix_unblock_lock(&nbl->nbl_lock);
2901 @@ -5317,7 +5365,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
2902 bool unhashed;
2903 LIST_HEAD(reaplist);
2904
2905 - s->st_stid.sc_type = NFS4_CLOSED_STID;
2906 spin_lock(&clp->cl_lock);
2907 unhashed = unhash_open_stateid(s, &reaplist);
2908
2909 @@ -5357,10 +5404,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2910 nfsd4_bump_seqid(cstate, status);
2911 if (status)
2912 goto out;
2913 +
2914 + stp->st_stid.sc_type = NFS4_CLOSED_STID;
2915 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
2916 - mutex_unlock(&stp->st_mutex);
2917
2918 nfsd4_close_open_stateid(stp);
2919 + mutex_unlock(&stp->st_mutex);
2920
2921 /* put reference from nfs4_preprocess_seqid_op */
2922 nfs4_put_stid(&stp->st_stid);
2923 @@ -7103,7 +7152,7 @@ nfs4_state_shutdown_net(struct net *net)
2924 spin_unlock(&nn->blocked_locks_lock);
2925
2926 while (!list_empty(&reaplist)) {
2927 - nbl = list_first_entry(&nn->blocked_locks_lru,
2928 + nbl = list_first_entry(&reaplist,
2929 struct nfsd4_blocked_lock, nbl_lru);
2930 list_del_init(&nbl->nbl_lru);
2931 posix_unblock_lock(&nbl->nbl_lock);
2932 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
2933 index fa1505292f6c..324a04df3785 100644
2934 --- a/include/acpi/acpi_bus.h
2935 +++ b/include/acpi/acpi_bus.h
2936 @@ -105,6 +105,7 @@ enum acpi_bus_device_type {
2937 ACPI_BUS_TYPE_THERMAL,
2938 ACPI_BUS_TYPE_POWER_BUTTON,
2939 ACPI_BUS_TYPE_SLEEP_BUTTON,
2940 + ACPI_BUS_TYPE_ECDT_EC,
2941 ACPI_BUS_DEVICE_TYPE_COUNT
2942 };
2943
2944 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
2945 index 29c691265b49..14499757338f 100644
2946 --- a/include/acpi/acpi_drivers.h
2947 +++ b/include/acpi/acpi_drivers.h
2948 @@ -58,6 +58,7 @@
2949 #define ACPI_VIDEO_HID "LNXVIDEO"
2950 #define ACPI_BAY_HID "LNXIOBAY"
2951 #define ACPI_DOCK_HID "LNXDOCK"
2952 +#define ACPI_ECDT_HID "LNXEC"
2953 /* Quirk for broken IBM BIOSes */
2954 #define ACPI_SMBUS_IBM_HID "SMBUSIBM"
2955
2956 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
2957 index 757dc6ffc7ba..1ac457511f4e 100644
2958 --- a/include/asm-generic/pgtable.h
2959 +++ b/include/asm-generic/pgtable.h
2960 @@ -814,6 +814,14 @@ static inline int pmd_write(pmd_t pmd)
2961 #endif /* __HAVE_ARCH_PMD_WRITE */
2962 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2963
2964 +#ifndef pud_write
2965 +static inline int pud_write(pud_t pud)
2966 +{
2967 + BUG();
2968 + return 0;
2969 +}
2970 +#endif /* pud_write */
2971 +
2972 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
2973 (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
2974 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
2975 diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
2976 index 75ec9c662268..aeec003a566b 100644
2977 --- a/include/crypto/if_alg.h
2978 +++ b/include/crypto/if_alg.h
2979 @@ -255,6 +255,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
2980 unsigned int ivsize);
2981 ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
2982 int offset, size_t size, int flags);
2983 +void af_alg_free_resources(struct af_alg_async_req *areq);
2984 void af_alg_async_cb(struct crypto_async_request *_req, int err);
2985 unsigned int af_alg_poll(struct file *file, struct socket *sock,
2986 poll_table *wait);
2987 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
2988 index 1e1908a6b1d6..a992434ded99 100644
2989 --- a/include/drm/drm_edid.h
2990 +++ b/include/drm/drm_edid.h
2991 @@ -360,7 +360,8 @@ void
2992 drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
2993 const struct drm_display_mode *mode,
2994 enum hdmi_quantization_range rgb_quant_range,
2995 - bool rgb_quant_range_selectable);
2996 + bool rgb_quant_range_selectable,
2997 + bool is_hdmi2_sink);
2998
2999 /**
3000 * drm_eld_mnl - Get ELD monitor name length in bytes.
3001 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
3002 index 54dfef70a072..780b1242bf24 100644
3003 --- a/include/linux/compiler-clang.h
3004 +++ b/include/linux/compiler-clang.h
3005 @@ -16,3 +16,6 @@
3006 * with any version that can compile the kernel
3007 */
3008 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
3009 +
3010 +#define randomized_struct_fields_start struct {
3011 +#define randomized_struct_fields_end };
3012 diff --git a/include/linux/fs.h b/include/linux/fs.h
3013 index 885266aae2d7..440281f8564d 100644
3014 --- a/include/linux/fs.h
3015 +++ b/include/linux/fs.h
3016 @@ -3069,7 +3069,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
3017 static inline int vfs_fstatat(int dfd, const char __user *filename,
3018 struct kstat *stat, int flags)
3019 {
3020 - return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
3021 + return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
3022 + stat, STATX_BASIC_STATS);
3023 }
3024 static inline int vfs_fstat(int fd, struct kstat *stat)
3025 {
3026 @@ -3175,6 +3176,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
3027 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
3028 }
3029
3030 +static inline bool vma_is_fsdax(struct vm_area_struct *vma)
3031 +{
3032 + struct inode *inode;
3033 +
3034 + if (!vma->vm_file)
3035 + return false;
3036 + if (!vma_is_dax(vma))
3037 + return false;
3038 + inode = file_inode(vma->vm_file);
3039 + if (inode->i_mode == S_IFCHR)
3040 + return false; /* device-dax */
3041 + return true;
3042 +}
3043 +
3044 static inline int iocb_flags(struct file *file)
3045 {
3046 int res = 0;
3047 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3048 index fbf5b31d47ee..82a25880714a 100644
3049 --- a/include/linux/hugetlb.h
3050 +++ b/include/linux/hugetlb.h
3051 @@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
3052 }
3053 #endif
3054
3055 -#ifndef pud_write
3056 -static inline int pud_write(pud_t pud)
3057 -{
3058 - BUG();
3059 - return 0;
3060 -}
3061 -#endif
3062 -
3063 #define HUGETLB_ANON_FILE "anon_hugepage"
3064
3065 enum {
3066 diff --git a/include/linux/migrate.h b/include/linux/migrate.h
3067 index 895ec0c4942e..a2246cf670ba 100644
3068 --- a/include/linux/migrate.h
3069 +++ b/include/linux/migrate.h
3070 @@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
3071 new_page = __alloc_pages_nodemask(gfp_mask, order,
3072 preferred_nid, nodemask);
3073
3074 - if (new_page && PageTransHuge(page))
3075 + if (new_page && PageTransHuge(new_page))
3076 prep_transhuge_page(new_page);
3077
3078 return new_page;
3079 diff --git a/include/linux/mm.h b/include/linux/mm.h
3080 index 43edf659453b..db647d428100 100644
3081 --- a/include/linux/mm.h
3082 +++ b/include/linux/mm.h
3083 @@ -367,6 +367,7 @@ enum page_entry_size {
3084 struct vm_operations_struct {
3085 void (*open)(struct vm_area_struct * area);
3086 void (*close)(struct vm_area_struct * area);
3087 + int (*split)(struct vm_area_struct * area, unsigned long addr);
3088 int (*mremap)(struct vm_area_struct * area);
3089 int (*fault)(struct vm_fault *vmf);
3090 int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
3091 @@ -1367,6 +1368,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
3092 unsigned int gup_flags, struct page **pages, int *locked);
3093 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3094 struct page **pages, unsigned int gup_flags);
3095 +#ifdef CONFIG_FS_DAX
3096 +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
3097 + unsigned int gup_flags, struct page **pages,
3098 + struct vm_area_struct **vmas);
3099 +#else
3100 +static inline long get_user_pages_longterm(unsigned long start,
3101 + unsigned long nr_pages, unsigned int gup_flags,
3102 + struct page **pages, struct vm_area_struct **vmas)
3103 +{
3104 + return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
3105 +}
3106 +#endif /* CONFIG_FS_DAX */
3107 +
3108 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
3109 struct page **pages);
3110
3111 diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
3112 index 90fc490f973f..821f71a2e48f 100644
3113 --- a/include/uapi/linux/bcache.h
3114 +++ b/include/uapi/linux/bcache.h
3115 @@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
3116
3117 #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
3118
3119 -#define PTR(gen, offset, dev) \
3120 +#define MAKE_PTR(gen, offset, dev) \
3121 ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
3122
3123 /* Bkey utility code */
3124 diff --git a/mm/frame_vector.c b/mm/frame_vector.c
3125 index 2f98df0d460e..297c7238f7d4 100644
3126 --- a/mm/frame_vector.c
3127 +++ b/mm/frame_vector.c
3128 @@ -53,6 +53,18 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
3129 ret = -EFAULT;
3130 goto out;
3131 }
3132 +
3133 + /*
3134 + * While get_vaddr_frames() could be used for transient (kernel
3135 + * controlled lifetime) pinning of memory pages all current
3136 + * users establish long term (userspace controlled lifetime)
3137 + * page pinning. Treat get_vaddr_frames() like
3138 + * get_user_pages_longterm() and disallow it for filesystem-dax
3139 + * mappings.
3140 + */
3141 + if (vma_is_fsdax(vma))
3142 + return -EOPNOTSUPP;
3143 +
3144 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
3145 vec->got_ref = true;
3146 vec->is_pfns = false;
3147 diff --git a/mm/gup.c b/mm/gup.c
3148 index b2b4d4263768..165ba2174c75 100644
3149 --- a/mm/gup.c
3150 +++ b/mm/gup.c
3151 @@ -1095,6 +1095,70 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
3152 }
3153 EXPORT_SYMBOL(get_user_pages);
3154
3155 +#ifdef CONFIG_FS_DAX
3156 +/*
3157 + * This is the same as get_user_pages() in that it assumes we are
3158 + * operating on the current task's mm, but it goes further to validate
3159 + * that the vmas associated with the address range are suitable for
3160 + * longterm elevated page reference counts. For example, filesystem-dax
3161 + * mappings are subject to the lifetime enforced by the filesystem and
3162 + * we need guarantees that longterm users like RDMA and V4L2 only
3163 + * establish mappings that have a kernel enforced revocation mechanism.
3164 + *
3165 + * "longterm" == userspace controlled elevated page count lifetime.
3166 + * Contrast this to iov_iter_get_pages() usages which are transient.
3167 + */
3168 +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
3169 + unsigned int gup_flags, struct page **pages,
3170 + struct vm_area_struct **vmas_arg)
3171 +{
3172 + struct vm_area_struct **vmas = vmas_arg;
3173 + struct vm_area_struct *vma_prev = NULL;
3174 + long rc, i;
3175 +
3176 + if (!pages)
3177 + return -EINVAL;
3178 +
3179 + if (!vmas) {
3180 + vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
3181 + GFP_KERNEL);
3182 + if (!vmas)
3183 + return -ENOMEM;
3184 + }
3185 +
3186 + rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
3187 +
3188 + for (i = 0; i < rc; i++) {
3189 + struct vm_area_struct *vma = vmas[i];
3190 +
3191 + if (vma == vma_prev)
3192 + continue;
3193 +
3194 + vma_prev = vma;
3195 +
3196 + if (vma_is_fsdax(vma))
3197 + break;
3198 + }
3199 +
3200 + /*
3201 + * Either get_user_pages() failed, or the vma validation
3202 + * succeeded, in either case we don't need to put_page() before
3203 + * returning.
3204 + */
3205 + if (i >= rc)
3206 + goto out;
3207 +
3208 + for (i = 0; i < rc; i++)
3209 + put_page(pages[i]);
3210 + rc = -EOPNOTSUPP;
3211 +out:
3212 + if (vmas != vmas_arg)
3213 + kfree(vmas);
3214 + return rc;
3215 +}
3216 +EXPORT_SYMBOL(get_user_pages_longterm);
3217 +#endif /* CONFIG_FS_DAX */
3218 +
3219 /**
3220 * populate_vma_page_range() - populate a range of pages in the vma.
3221 * @vma: target vma
3222 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3223 index 1981ed697dab..eba34cdfc3e5 100644
3224 --- a/mm/huge_memory.c
3225 +++ b/mm/huge_memory.c
3226 @@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
3227 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
3228
3229 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
3230 - pmd_t *pmd)
3231 + pmd_t *pmd, int flags)
3232 {
3233 pmd_t _pmd;
3234
3235 - /*
3236 - * We should set the dirty bit only for FOLL_WRITE but for now
3237 - * the dirty bit in the pmd is meaningless. And if the dirty
3238 - * bit will become meaningful and we'll only set it with
3239 - * FOLL_WRITE, an atomic set_bit will be required on the pmd to
3240 - * set the young bit, instead of the current set_pmd_at.
3241 - */
3242 - _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
3243 + _pmd = pmd_mkyoung(*pmd);
3244 + if (flags & FOLL_WRITE)
3245 + _pmd = pmd_mkdirty(_pmd);
3246 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
3247 - pmd, _pmd, 1))
3248 + pmd, _pmd, flags & FOLL_WRITE))
3249 update_mmu_cache_pmd(vma, addr, pmd);
3250 }
3251
3252 @@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
3253 return NULL;
3254
3255 if (flags & FOLL_TOUCH)
3256 - touch_pmd(vma, addr, pmd);
3257 + touch_pmd(vma, addr, pmd, flags);
3258
3259 /*
3260 * device mapped pages can only be returned if the
3261 @@ -995,20 +990,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
3262
3263 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
3264 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
3265 - pud_t *pud)
3266 + pud_t *pud, int flags)
3267 {
3268 pud_t _pud;
3269
3270 - /*
3271 - * We should set the dirty bit only for FOLL_WRITE but for now
3272 - * the dirty bit in the pud is meaningless. And if the dirty
3273 - * bit will become meaningful and we'll only set it with
3274 - * FOLL_WRITE, an atomic set_bit will be required on the pud to
3275 - * set the young bit, instead of the current set_pud_at.
3276 - */
3277 - _pud = pud_mkyoung(pud_mkdirty(*pud));
3278 + _pud = pud_mkyoung(*pud);
3279 + if (flags & FOLL_WRITE)
3280 + _pud = pud_mkdirty(_pud);
3281 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
3282 - pud, _pud, 1))
3283 + pud, _pud, flags & FOLL_WRITE))
3284 update_mmu_cache_pud(vma, addr, pud);
3285 }
3286
3287 @@ -1031,7 +1021,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
3288 return NULL;
3289
3290 if (flags & FOLL_TOUCH)
3291 - touch_pud(vma, addr, pud);
3292 + touch_pud(vma, addr, pud, flags);
3293
3294 /*
3295 * device mapped pages can only be returned if the
3296 @@ -1407,7 +1397,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
3297 page = pmd_page(*pmd);
3298 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
3299 if (flags & FOLL_TOUCH)
3300 - touch_pmd(vma, addr, pmd);
3301 + touch_pmd(vma, addr, pmd, flags);
3302 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
3303 /*
3304 * We don't mlock() pte-mapped THPs. This way we can avoid
3305 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3306 index 2d2ff5e8bf2b..c539941671b4 100644
3307 --- a/mm/hugetlb.c
3308 +++ b/mm/hugetlb.c
3309 @@ -3125,6 +3125,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3310 }
3311 }
3312
3313 +static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3314 +{
3315 + if (addr & ~(huge_page_mask(hstate_vma(vma))))
3316 + return -EINVAL;
3317 + return 0;
3318 +}
3319 +
3320 /*
3321 * We cannot handle pagefaults against hugetlb pages at all. They cause
3322 * handle_mm_fault() to try to instantiate regular-sized pages in the
3323 @@ -3141,6 +3148,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
3324 .fault = hugetlb_vm_op_fault,
3325 .open = hugetlb_vm_op_open,
3326 .close = hugetlb_vm_op_close,
3327 + .split = hugetlb_vm_op_split,
3328 };
3329
3330 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3331 @@ -4617,7 +4625,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
3332 pte_t *pte = NULL;
3333
3334 pgd = pgd_offset(mm, addr);
3335 - p4d = p4d_offset(pgd, addr);
3336 + p4d = p4d_alloc(mm, pgd, addr);
3337 + if (!p4d)
3338 + return NULL;
3339 pud = pud_alloc(mm, p4d, addr);
3340 if (pud) {
3341 if (sz == PUD_SIZE) {
3342 diff --git a/mm/madvise.c b/mm/madvise.c
3343 index 375cf32087e4..751e97aa2210 100644
3344 --- a/mm/madvise.c
3345 +++ b/mm/madvise.c
3346 @@ -276,15 +276,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
3347 {
3348 struct file *file = vma->vm_file;
3349
3350 + *prev = vma;
3351 #ifdef CONFIG_SWAP
3352 if (!file) {
3353 - *prev = vma;
3354 force_swapin_readahead(vma, start, end);
3355 return 0;
3356 }
3357
3358 if (shmem_mapping(file->f_mapping)) {
3359 - *prev = vma;
3360 force_shm_swapin_readahead(vma, start, end,
3361 file->f_mapping);
3362 return 0;
3363 @@ -299,7 +298,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
3364 return 0;
3365 }
3366
3367 - *prev = vma;
3368 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3369 if (end > vma->vm_end)
3370 end = vma->vm_end;
3371 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3372 index 661f046ad318..53f7c919b916 100644
3373 --- a/mm/memcontrol.c
3374 +++ b/mm/memcontrol.c
3375 @@ -6044,7 +6044,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
3376 memcg_check_events(memcg, page);
3377
3378 if (!mem_cgroup_is_root(memcg))
3379 - css_put(&memcg->css);
3380 + css_put_many(&memcg->css, nr_entries);
3381 }
3382
3383 /**
3384 diff --git a/mm/mmap.c b/mm/mmap.c
3385 index 680506faceae..476e810cf100 100644
3386 --- a/mm/mmap.c
3387 +++ b/mm/mmap.c
3388 @@ -2540,9 +2540,11 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
3389 struct vm_area_struct *new;
3390 int err;
3391
3392 - if (is_vm_hugetlb_page(vma) && (addr &
3393 - ~(huge_page_mask(hstate_vma(vma)))))
3394 - return -EINVAL;
3395 + if (vma->vm_ops && vma->vm_ops->split) {
3396 + err = vma->vm_ops->split(vma, addr);
3397 + if (err)
3398 + return err;
3399 + }
3400
3401 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
3402 if (!new)
3403 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3404 index dee0f75c3013..18c5b356b505 100644
3405 --- a/mm/oom_kill.c
3406 +++ b/mm/oom_kill.c
3407 @@ -532,7 +532,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
3408 */
3409 set_bit(MMF_UNSTABLE, &mm->flags);
3410
3411 - tlb_gather_mmu(&tlb, mm, 0, -1);
3412 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
3413 if (!can_madv_dontneed_vma(vma))
3414 continue;
3415 @@ -547,11 +546,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
3416 * we do not want to block exit_mmap by keeping mm ref
3417 * count elevated without a good reason.
3418 */
3419 - if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
3420 + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
3421 + tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
3422 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
3423 NULL);
3424 + tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
3425 + }
3426 }
3427 - tlb_finish_mmu(&tlb, 0, -1);
3428 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
3429 task_pid_nr(tsk), tsk->comm,
3430 K(get_mm_counter(mm, MM_ANONPAGES)),
3431 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3432 index 82a6270c9743..d51c2087c498 100644
3433 --- a/mm/page_alloc.c
3434 +++ b/mm/page_alloc.c
3435 @@ -2487,10 +2487,6 @@ void drain_all_pages(struct zone *zone)
3436 if (WARN_ON_ONCE(!mm_percpu_wq))
3437 return;
3438
3439 - /* Workqueues cannot recurse */
3440 - if (current->flags & PF_WQ_WORKER)
3441 - return;
3442 -
3443 /*
3444 * Do not drain if one is already in progress unless it's specific to
3445 * a zone. Such callers are primarily CMA and memory hotplug and need
3446 @@ -7591,11 +7587,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
3447
3448 /*
3449 * In case of -EBUSY, we'd like to know which page causes problem.
3450 - * So, just fall through. We will check it in test_pages_isolated().
3451 + * So, just fall through. test_pages_isolated() has a tracepoint
3452 + * which will report the busy page.
3453 + *
3454 + * It is possible that busy pages could become available before
3455 + * the call to test_pages_isolated, and the range will actually be
3456 + * allocated. So, if we fall through be sure to clear ret so that
3457 + * -EBUSY is not accidentally used or returned to caller.
3458 */
3459 ret = __alloc_contig_migrate_range(&cc, start, end);
3460 if (ret && ret != -EBUSY)
3461 goto done;
3462 + ret =0;
3463
3464 /*
3465 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
3466 diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
3467 index 620e81169659..4ac095118717 100644
3468 --- a/security/apparmor/include/audit.h
3469 +++ b/security/apparmor/include/audit.h
3470 @@ -121,17 +121,19 @@ struct apparmor_audit_data {
3471 /* these entries require a custom callback fn */
3472 struct {
3473 struct aa_label *peer;
3474 - struct {
3475 - const char *target;
3476 - kuid_t ouid;
3477 - } fs;
3478 + union {
3479 + struct {
3480 + const char *target;
3481 + kuid_t ouid;
3482 + } fs;
3483 + int signal;
3484 + };
3485 };
3486 struct {
3487 struct aa_profile *profile;
3488 const char *ns;
3489 long pos;
3490 } iface;
3491 - int signal;
3492 struct {
3493 int rlim;
3494 unsigned long max;