Magellan Linux

Contents of /trunk/kernel-alx/patches-4.14/0112-4.14.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (show annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 5 months ago) by niro
File size: 49645 byte(s)
-added up to patches-4.14.79
1 diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
2 index ad41b3813f0a..ea91cb61a602 100644
3 --- a/Documentation/x86/x86_64/mm.txt
4 +++ b/Documentation/x86/x86_64/mm.txt
5 @@ -12,8 +12,9 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
6 ... unused hole ...
7 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
8 ... unused hole ...
9 -fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
10 -fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
11 + vaddr_end for KASLR
12 +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
13 +fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
14 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
15 ... unused hole ...
16 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
17 @@ -37,13 +38,15 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
18 ... unused hole ...
19 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
20 ... unused hole ...
21 -fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
22 + vaddr_end for KASLR
23 +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
24 +... unused hole ...
25 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
26 ... unused hole ...
27 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
28 ... unused hole ...
29 ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
30 -ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space
31 +ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
32 [fixmap start] - ffffffffff5fffff kernel-internal fixmap range
33 ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
34 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
35 @@ -67,9 +70,10 @@ memory window (this size is arbitrary, it can be raised later if needed).
36 The mappings are not part of any other kernel PGD and are only available
37 during EFI runtime calls.
38
39 -The module mapping space size changes based on the CONFIG requirements for the
40 -following fixmap section.
41 -
42 Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
43 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
44 Their order is preserved but their base will be offset early at boot time.
45 +
46 +Be very careful vs. KASLR when changing anything here. The KASLR address
47 +range must not overlap with anything except the KASAN shadow area, which is
48 +correct as KASAN disables KASLR.
49 diff --git a/Makefile b/Makefile
50 index 20f7d4de0f1c..a67c5179052a 100644
51 --- a/Makefile
52 +++ b/Makefile
53 @@ -1,7 +1,7 @@
54 # SPDX-License-Identifier: GPL-2.0
55 VERSION = 4
56 PATCHLEVEL = 14
57 -SUBLEVEL = 12
58 +SUBLEVEL = 13
59 EXTRAVERSION =
60 NAME = Petit Gorille
61
62 diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
63 index f35974ee7264..c9173c02081c 100644
64 --- a/arch/arc/include/asm/uaccess.h
65 +++ b/arch/arc/include/asm/uaccess.h
66 @@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
67 return 0;
68
69 __asm__ __volatile__(
70 + " mov lp_count, %5 \n"
71 " lp 3f \n"
72 "1: ldb.ab %3, [%2, 1] \n"
73 " breq.d %3, 0, 3f \n"
74 @@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
75 " .word 1b, 4b \n"
76 " .previous \n"
77 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
78 - : "g"(-EFAULT), "l"(count)
79 - : "memory");
80 + : "g"(-EFAULT), "r"(count)
81 + : "lp_count", "lp_start", "lp_end", "memory");
82
83 return res;
84 }
85 diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
86 index dd5a08aaa4da..3eb4bfc1fb36 100644
87 --- a/arch/parisc/include/asm/ldcw.h
88 +++ b/arch/parisc/include/asm/ldcw.h
89 @@ -12,6 +12,7 @@
90 for the semaphore. */
91
92 #define __PA_LDCW_ALIGNMENT 16
93 +#define __PA_LDCW_ALIGN_ORDER 4
94 #define __ldcw_align(a) ({ \
95 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
96 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
97 @@ -29,6 +30,7 @@
98 ldcd). */
99
100 #define __PA_LDCW_ALIGNMENT 4
101 +#define __PA_LDCW_ALIGN_ORDER 2
102 #define __ldcw_align(a) (&(a)->slock)
103 #define __LDCW "ldcw,co"
104
105 diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
106 index f3cecf5117cf..e95207c0565e 100644
107 --- a/arch/parisc/kernel/entry.S
108 +++ b/arch/parisc/kernel/entry.S
109 @@ -35,6 +35,7 @@
110 #include <asm/pgtable.h>
111 #include <asm/signal.h>
112 #include <asm/unistd.h>
113 +#include <asm/ldcw.h>
114 #include <asm/thread_info.h>
115
116 #include <linux/linkage.h>
117 @@ -46,6 +47,14 @@
118 #endif
119
120 .import pa_tlb_lock,data
121 + .macro load_pa_tlb_lock reg
122 +#if __PA_LDCW_ALIGNMENT > 4
123 + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
124 + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
125 +#else
126 + load32 PA(pa_tlb_lock), \reg
127 +#endif
128 + .endm
129
130 /* space_to_prot macro creates a prot id from a space id */
131
132 @@ -457,7 +466,7 @@
133 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
134 #ifdef CONFIG_SMP
135 cmpib,COND(=),n 0,\spc,2f
136 - load32 PA(pa_tlb_lock),\tmp
137 + load_pa_tlb_lock \tmp
138 1: LDCW 0(\tmp),\tmp1
139 cmpib,COND(=) 0,\tmp1,1b
140 nop
141 @@ -480,7 +489,7 @@
142 /* Release pa_tlb_lock lock. */
143 .macro tlb_unlock1 spc,tmp
144 #ifdef CONFIG_SMP
145 - load32 PA(pa_tlb_lock),\tmp
146 + load_pa_tlb_lock \tmp
147 tlb_unlock0 \spc,\tmp
148 #endif
149 .endm
150 diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
151 index adf7187f8951..2d40c4ff3f69 100644
152 --- a/arch/parisc/kernel/pacache.S
153 +++ b/arch/parisc/kernel/pacache.S
154 @@ -36,6 +36,7 @@
155 #include <asm/assembly.h>
156 #include <asm/pgtable.h>
157 #include <asm/cache.h>
158 +#include <asm/ldcw.h>
159 #include <linux/linkage.h>
160
161 .text
162 @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
163
164 .macro tlb_lock la,flags,tmp
165 #ifdef CONFIG_SMP
166 - ldil L%pa_tlb_lock,%r1
167 - ldo R%pa_tlb_lock(%r1),\la
168 +#if __PA_LDCW_ALIGNMENT > 4
169 + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
170 + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
171 +#else
172 + load32 pa_tlb_lock, \la
173 +#endif
174 rsm PSW_SM_I,\flags
175 1: LDCW 0(\la),\tmp
176 cmpib,<>,n 0,\tmp,3f
177 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
178 index 30f92391a93e..cad3e8661cd6 100644
179 --- a/arch/parisc/kernel/process.c
180 +++ b/arch/parisc/kernel/process.c
181 @@ -39,6 +39,7 @@
182 #include <linux/kernel.h>
183 #include <linux/mm.h>
184 #include <linux/fs.h>
185 +#include <linux/cpu.h>
186 #include <linux/module.h>
187 #include <linux/personality.h>
188 #include <linux/ptrace.h>
189 @@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
190 return 1;
191 }
192
193 +/*
194 + * Idle thread support
195 + *
196 + * Detect when running on QEMU with SeaBIOS PDC Firmware and let
197 + * QEMU idle the host too.
198 + */
199 +
200 +int running_on_qemu __read_mostly;
201 +
202 +void __cpuidle arch_cpu_idle_dead(void)
203 +{
204 + /* nop on real hardware, qemu will offline CPU. */
205 + asm volatile("or %%r31,%%r31,%%r31\n":::);
206 +}
207 +
208 +void __cpuidle arch_cpu_idle(void)
209 +{
210 + local_irq_enable();
211 +
212 + /* nop on real hardware, qemu will idle sleep. */
213 + asm volatile("or %%r10,%%r10,%%r10\n":::);
214 +}
215 +
216 +static int __init parisc_idle_init(void)
217 +{
218 + const char *marker;
219 +
220 + /* check QEMU/SeaBIOS marker in PAGE0 */
221 + marker = (char *) &PAGE0->pad0;
222 + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
223 +
224 + if (!running_on_qemu)
225 + cpu_idle_poll_ctrl(1);
226 +
227 + return 0;
228 +}
229 +arch_initcall(parisc_idle_init);
230 +
231 /*
232 * Copy architecture-specific thread state
233 */
234 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
235 index 4797d08581ce..6e1e39035380 100644
236 --- a/arch/powerpc/mm/fault.c
237 +++ b/arch/powerpc/mm/fault.c
238 @@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
239 return __bad_area(regs, address, SEGV_MAPERR);
240 }
241
242 +static noinline int bad_access(struct pt_regs *regs, unsigned long address)
243 +{
244 + return __bad_area(regs, address, SEGV_ACCERR);
245 +}
246 +
247 static int do_sigbus(struct pt_regs *regs, unsigned long address,
248 unsigned int fault)
249 {
250 @@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
251
252 good_area:
253 if (unlikely(access_error(is_write, is_exec, vma)))
254 - return bad_area(regs, address);
255 + return bad_access(regs, address);
256
257 /*
258 * If for any reason at all we couldn't handle the fault,
259 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
260 index 40d0a1a97889..b87a930c2201 100644
261 --- a/arch/s390/kvm/kvm-s390.c
262 +++ b/arch/s390/kvm/kvm-s390.c
263 @@ -794,11 +794,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
264
265 if (kvm->arch.use_cmma) {
266 /*
267 - * Get the last slot. They should be sorted by base_gfn, so the
268 - * last slot is also the one at the end of the address space.
269 - * We have verified above that at least one slot is present.
270 + * Get the first slot. They are reverse sorted by base_gfn, so
271 + * the first slot is also the one at the end of the address
272 + * space. We have verified above that at least one slot is
273 + * present.
274 */
275 - ms = slots->memslots + slots->used_slots - 1;
276 + ms = slots->memslots;
277 /* round up so we only use full longs */
278 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
279 /* allocate enough bytes to store all the bits */
280 diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
281 index 5b25287f449b..7bd3a59232f0 100644
282 --- a/arch/s390/kvm/priv.c
283 +++ b/arch/s390/kvm/priv.c
284 @@ -1009,7 +1009,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
285 cbrlo[entries] = gfn << PAGE_SHIFT;
286 }
287
288 - if (orc) {
289 + if (orc && gfn < ms->bitmap_size) {
290 /* increment only if we are really flipping the bit to 1 */
291 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
292 atomic64_inc(&ms->dirty_pages);
293 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
294 index 8f0aace08b87..8156e47da7ba 100644
295 --- a/arch/x86/events/intel/ds.c
296 +++ b/arch/x86/events/intel/ds.c
297 @@ -5,6 +5,7 @@
298
299 #include <asm/cpu_entry_area.h>
300 #include <asm/perf_event.h>
301 +#include <asm/tlbflush.h>
302 #include <asm/insn.h>
303
304 #include "../perf_event.h"
305 @@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
306
307 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
308 {
309 + unsigned long start = (unsigned long)cea;
310 phys_addr_t pa;
311 size_t msz = 0;
312
313 pa = virt_to_phys(addr);
314 +
315 + preempt_disable();
316 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
317 cea_set_pte(cea, pa, prot);
318 +
319 + /*
320 + * This is a cross-CPU update of the cpu_entry_area, we must shoot down
321 + * all TLB entries for it.
322 + */
323 + flush_tlb_kernel_range(start, start + size);
324 + preempt_enable();
325 }
326
327 static void ds_clear_cea(void *cea, size_t size)
328 {
329 + unsigned long start = (unsigned long)cea;
330 size_t msz = 0;
331
332 + preempt_disable();
333 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
334 cea_set_pte(cea, 0, PAGE_NONE);
335 +
336 + flush_tlb_kernel_range(start, start + size);
337 + preempt_enable();
338 }
339
340 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
341 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
342 index dbfd0854651f..cf5961ca8677 100644
343 --- a/arch/x86/include/asm/alternative.h
344 +++ b/arch/x86/include/asm/alternative.h
345 @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
346 ".popsection\n" \
347 ".pushsection .altinstr_replacement, \"ax\"\n" \
348 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
349 - ".popsection"
350 + ".popsection\n"
351
352 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
353 OLDINSTR_2(oldinstr, 1, 2) \
354 @@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
355 ".pushsection .altinstr_replacement, \"ax\"\n" \
356 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
357 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
358 - ".popsection"
359 + ".popsection\n"
360
361 /*
362 * Alternative instructions for different CPU types or capabilities.
363 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
364 index 07cdd1715705..21ac898df2d8 100644
365 --- a/arch/x86/include/asm/cpufeatures.h
366 +++ b/arch/x86/include/asm/cpufeatures.h
367 @@ -341,6 +341,6 @@
368 #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
369 #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
370 #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
371 -#define X86_BUG_CPU_INSECURE X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
372 +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
373
374 #endif /* _ASM_X86_CPUFEATURES_H */
375 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
376 index b97a539bcdee..6b8f73dcbc2c 100644
377 --- a/arch/x86/include/asm/pgtable_64_types.h
378 +++ b/arch/x86/include/asm/pgtable_64_types.h
379 @@ -75,7 +75,13 @@ typedef struct { pteval_t pte; } pte_t;
380 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
381 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
382
383 -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
384 +/*
385 + * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
386 + *
387 + * Be very careful vs. KASLR when changing anything here. The KASLR address
388 + * range must not overlap with anything except the KASAN shadow area, which
389 + * is correct as KASAN disables KASLR.
390 + */
391 #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
392
393 #ifdef CONFIG_X86_5LEVEL
394 @@ -88,7 +94,7 @@ typedef struct { pteval_t pte; } pte_t;
395 # define VMALLOC_SIZE_TB _AC(32, UL)
396 # define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
397 # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
398 -# define LDT_PGD_ENTRY _AC(-4, UL)
399 +# define LDT_PGD_ENTRY _AC(-3, UL)
400 # define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
401 #endif
402
403 @@ -104,13 +110,13 @@ typedef struct { pteval_t pte; } pte_t;
404
405 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
406 /* The module sections ends with the start of the fixmap */
407 -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
408 +#define MODULES_END _AC(0xffffffffff000000, UL)
409 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
410
411 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
412 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
413
414 -#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
415 +#define CPU_ENTRY_AREA_PGD _AC(-4, UL)
416 #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
417
418 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
419 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
420 index 90cb82dbba57..570e8bb1f386 100644
421 --- a/arch/x86/kernel/cpu/Makefile
422 +++ b/arch/x86/kernel/cpu/Makefile
423 @@ -22,7 +22,7 @@ obj-y += common.o
424 obj-y += rdrand.o
425 obj-y += match.o
426 obj-y += bugs.o
427 -obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
428 +obj-y += aperfmperf.o
429 obj-y += cpuid-deps.o
430
431 obj-$(CONFIG_PROC_FS) += proc.o
432 diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
433 index 0ee83321a313..7eba34df54c3 100644
434 --- a/arch/x86/kernel/cpu/aperfmperf.c
435 +++ b/arch/x86/kernel/cpu/aperfmperf.c
436 @@ -14,6 +14,8 @@
437 #include <linux/percpu.h>
438 #include <linux/smp.h>
439
440 +#include "cpu.h"
441 +
442 struct aperfmperf_sample {
443 unsigned int khz;
444 ktime_t time;
445 @@ -24,7 +26,7 @@ struct aperfmperf_sample {
446 static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
447
448 #define APERFMPERF_CACHE_THRESHOLD_MS 10
449 -#define APERFMPERF_REFRESH_DELAY_MS 20
450 +#define APERFMPERF_REFRESH_DELAY_MS 10
451 #define APERFMPERF_STALE_THRESHOLD_MS 1000
452
453 /*
454 @@ -38,14 +40,8 @@ static void aperfmperf_snapshot_khz(void *dummy)
455 u64 aperf, aperf_delta;
456 u64 mperf, mperf_delta;
457 struct aperfmperf_sample *s = this_cpu_ptr(&samples);
458 - ktime_t now = ktime_get();
459 - s64 time_delta = ktime_ms_delta(now, s->time);
460 unsigned long flags;
461
462 - /* Don't bother re-computing within the cache threshold time. */
463 - if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
464 - return;
465 -
466 local_irq_save(flags);
467 rdmsrl(MSR_IA32_APERF, aperf);
468 rdmsrl(MSR_IA32_MPERF, mperf);
469 @@ -61,31 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy)
470 if (mperf_delta == 0)
471 return;
472
473 - s->time = now;
474 + s->time = ktime_get();
475 s->aperf = aperf;
476 s->mperf = mperf;
477 + s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
478 +}
479
480 - /* If the previous iteration was too long ago, discard it. */
481 - if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
482 - s->khz = 0;
483 - else
484 - s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
485 +static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
486 +{
487 + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
488 +
489 + /* Don't bother re-computing within the cache threshold time. */
490 + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
491 + return true;
492 +
493 + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
494 +
495 + /* Return false if the previous iteration was too long ago. */
496 + return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
497 }
498
499 -unsigned int arch_freq_get_on_cpu(int cpu)
500 +unsigned int aperfmperf_get_khz(int cpu)
501 {
502 - unsigned int khz;
503 + if (!cpu_khz)
504 + return 0;
505 +
506 + if (!static_cpu_has(X86_FEATURE_APERFMPERF))
507 + return 0;
508
509 + aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
510 + return per_cpu(samples.khz, cpu);
511 +}
512 +
513 +void arch_freq_prepare_all(void)
514 +{
515 + ktime_t now = ktime_get();
516 + bool wait = false;
517 + int cpu;
518 +
519 + if (!cpu_khz)
520 + return;
521 +
522 + if (!static_cpu_has(X86_FEATURE_APERFMPERF))
523 + return;
524 +
525 + for_each_online_cpu(cpu)
526 + if (!aperfmperf_snapshot_cpu(cpu, now, false))
527 + wait = true;
528 +
529 + if (wait)
530 + msleep(APERFMPERF_REFRESH_DELAY_MS);
531 +}
532 +
533 +unsigned int arch_freq_get_on_cpu(int cpu)
534 +{
535 if (!cpu_khz)
536 return 0;
537
538 if (!static_cpu_has(X86_FEATURE_APERFMPERF))
539 return 0;
540
541 - smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
542 - khz = per_cpu(samples.khz, cpu);
543 - if (khz)
544 - return khz;
545 + if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
546 + return per_cpu(samples.khz, cpu);
547
548 msleep(APERFMPERF_REFRESH_DELAY_MS);
549 smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
550 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
551 index b1be494ab4e8..2d3bd2215e5b 100644
552 --- a/arch/x86/kernel/cpu/common.c
553 +++ b/arch/x86/kernel/cpu/common.c
554 @@ -900,7 +900,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
555 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
556
557 if (c->x86_vendor != X86_VENDOR_AMD)
558 - setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
559 + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
560
561 fpu__init_system(c);
562
563 diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
564 index f52a370b6c00..e806b11a99af 100644
565 --- a/arch/x86/kernel/cpu/cpu.h
566 +++ b/arch/x86/kernel/cpu/cpu.h
567 @@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
568
569 extern void get_cpu_cap(struct cpuinfo_x86 *c);
570 extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
571 +
572 +unsigned int aperfmperf_get_khz(int cpu);
573 +
574 #endif /* ARCH_X86_CPU_H */
575 diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
576 index c6daec4bdba5..330b8462d426 100644
577 --- a/arch/x86/kernel/cpu/microcode/amd.c
578 +++ b/arch/x86/kernel/cpu/microcode/amd.c
579 @@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
580 #define F14H_MPB_MAX_SIZE 1824
581 #define F15H_MPB_MAX_SIZE 4096
582 #define F16H_MPB_MAX_SIZE 3458
583 +#define F17H_MPB_MAX_SIZE 3200
584
585 switch (family) {
586 case 0x14:
587 @@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
588 case 0x16:
589 max_size = F16H_MPB_MAX_SIZE;
590 break;
591 + case 0x17:
592 + max_size = F17H_MPB_MAX_SIZE;
593 + break;
594 default:
595 max_size = F1XH_MPB_MAX_SIZE;
596 break;
597 diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
598 index 6b7e17bf0b71..e7ecedafa1c8 100644
599 --- a/arch/x86/kernel/cpu/proc.c
600 +++ b/arch/x86/kernel/cpu/proc.c
601 @@ -5,6 +5,8 @@
602 #include <linux/seq_file.h>
603 #include <linux/cpufreq.h>
604
605 +#include "cpu.h"
606 +
607 /*
608 * Get CPU information for use by the procfs.
609 */
610 @@ -78,8 +80,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
611 seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
612
613 if (cpu_has(c, X86_FEATURE_TSC)) {
614 - unsigned int freq = cpufreq_quick_get(cpu);
615 + unsigned int freq = aperfmperf_get_khz(cpu);
616
617 + if (!freq)
618 + freq = cpufreq_quick_get(cpu);
619 if (!freq)
620 freq = cpu_khz;
621 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
622 diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
623 index f56902c1f04b..2a4849e92831 100644
624 --- a/arch/x86/mm/dump_pagetables.c
625 +++ b/arch/x86/mm/dump_pagetables.c
626 @@ -61,10 +61,10 @@ enum address_markers_idx {
627 KASAN_SHADOW_START_NR,
628 KASAN_SHADOW_END_NR,
629 #endif
630 + CPU_ENTRY_AREA_NR,
631 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
632 LDT_NR,
633 #endif
634 - CPU_ENTRY_AREA_NR,
635 #ifdef CONFIG_X86_ESPFIX64
636 ESPFIX_START_NR,
637 #endif
638 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
639 index 80259ad8c386..6b462a472a7b 100644
640 --- a/arch/x86/mm/init.c
641 +++ b/arch/x86/mm/init.c
642 @@ -870,7 +870,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
643 .next_asid = 1,
644 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
645 };
646 -EXPORT_SYMBOL_GPL(cpu_tlbstate);
647 +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
648
649 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
650 {
651 diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
652 index 879ef930e2c2..aedebd2ebf1e 100644
653 --- a/arch/x86/mm/kaslr.c
654 +++ b/arch/x86/mm/kaslr.c
655 @@ -34,25 +34,14 @@
656 #define TB_SHIFT 40
657
658 /*
659 - * Virtual address start and end range for randomization. The end changes base
660 - * on configuration to have the highest amount of space for randomization.
661 - * It increases the possible random position for each randomized region.
662 + * Virtual address start and end range for randomization.
663 *
664 - * You need to add an if/def entry if you introduce a new memory region
665 - * compatible with KASLR. Your entry must be in logical order with memory
666 - * layout. For example, ESPFIX is before EFI because its virtual address is
667 - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
668 - * ensure that this order is correct and won't be changed.
669 + * The end address could depend on more configuration options to make the
670 + * highest amount of space for randomization available, but that's too hard
671 + * to keep straight and caused issues already.
672 */
673 static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
674 -
675 -#if defined(CONFIG_X86_ESPFIX64)
676 -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
677 -#elif defined(CONFIG_EFI)
678 -static const unsigned long vaddr_end = EFI_VA_END;
679 -#else
680 -static const unsigned long vaddr_end = __START_KERNEL_map;
681 -#endif
682 +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
683
684 /* Default values */
685 unsigned long page_offset_base = __PAGE_OFFSET_BASE;
686 @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void)
687 unsigned long remain_entropy;
688
689 /*
690 - * All these BUILD_BUG_ON checks ensures the memory layout is
691 - * consistent with the vaddr_start/vaddr_end variables.
692 + * These BUILD_BUG_ON checks ensure the memory layout is consistent
693 + * with the vaddr_start/vaddr_end variables. These checks are very
694 + * limited....
695 */
696 BUILD_BUG_ON(vaddr_start >= vaddr_end);
697 - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
698 - vaddr_end >= EFI_VA_END);
699 - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
700 - IS_ENABLED(CONFIG_EFI)) &&
701 - vaddr_end >= __START_KERNEL_map);
702 + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
703 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
704
705 if (!kaslr_memory_enabled())
706 diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
707 index 2da28ba97508..43d4a4a29037 100644
708 --- a/arch/x86/mm/pti.c
709 +++ b/arch/x86/mm/pti.c
710 @@ -56,13 +56,13 @@
711
712 static void __init pti_print_if_insecure(const char *reason)
713 {
714 - if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
715 + if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
716 pr_info("%s\n", reason);
717 }
718
719 static void __init pti_print_if_secure(const char *reason)
720 {
721 - if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
722 + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
723 pr_info("%s\n", reason);
724 }
725
726 @@ -96,7 +96,7 @@ void __init pti_check_boottime_disable(void)
727 }
728
729 autosel:
730 - if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
731 + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
732 return;
733 enable:
734 setup_force_cpu_cap(X86_FEATURE_PTI);
735 diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
736 index 8a99a2e96537..5b513ccffde4 100644
737 --- a/arch/x86/platform/efi/quirks.c
738 +++ b/arch/x86/platform/efi/quirks.c
739 @@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
740 /*
741 * Update the first page pointer to skip over the CSH header.
742 */
743 - cap_info->pages[0] += csh->headersize;
744 + cap_info->phys[0] += csh->headersize;
745 +
746 + /*
747 + * cap_info->capsule should point at a virtual mapping of the entire
748 + * capsule, starting at the capsule header. Our image has the Quark
749 + * security header prepended, so we cannot rely on the default vmap()
750 + * mapping created by the generic capsule code.
751 + * Given that the Quark firmware does not appear to care about the
752 + * virtual mapping, let's just point cap_info->capsule at our copy
753 + * of the capsule header.
754 + */
755 + cap_info->capsule = &cap_info->header;
756
757 return 1;
758 }
759 diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
760 index db1bc3147bc4..600afa99941f 100644
761 --- a/crypto/chacha20poly1305.c
762 +++ b/crypto/chacha20poly1305.c
763 @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
764 algt->mask));
765 if (IS_ERR(poly))
766 return PTR_ERR(poly);
767 + poly_hash = __crypto_hash_alg_common(poly);
768 +
769 + err = -EINVAL;
770 + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
771 + goto out_put_poly;
772
773 err = -ENOMEM;
774 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
775 @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
776
777 ctx = aead_instance_ctx(inst);
778 ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
779 - poly_hash = __crypto_hash_alg_common(poly);
780 err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
781 aead_crypto_instance(inst));
782 if (err)
783 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
784 index ee9cfb99fe25..f8ec3d4ba4a8 100644
785 --- a/crypto/pcrypt.c
786 +++ b/crypto/pcrypt.c
787 @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
788 crypto_free_aead(ctx->child);
789 }
790
791 +static void pcrypt_free(struct aead_instance *inst)
792 +{
793 + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
794 +
795 + crypto_drop_aead(&ctx->spawn);
796 + kfree(inst);
797 +}
798 +
799 static int pcrypt_init_instance(struct crypto_instance *inst,
800 struct crypto_alg *alg)
801 {
802 @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
803 inst->alg.encrypt = pcrypt_aead_encrypt;
804 inst->alg.decrypt = pcrypt_aead_decrypt;
805
806 + inst->free = pcrypt_free;
807 +
808 err = aead_register_instance(tmpl, inst);
809 if (err)
810 goto out_drop_aead;
811 @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
812 return -EINVAL;
813 }
814
815 -static void pcrypt_free(struct crypto_instance *inst)
816 -{
817 - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
818 -
819 - crypto_drop_aead(&ctx->spawn);
820 - kfree(inst);
821 -}
822 -
823 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
824 unsigned long val, void *data)
825 {
826 @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
827 static struct crypto_template pcrypt_tmpl = {
828 .name = "pcrypt",
829 .create = pcrypt_create,
830 - .free = pcrypt_free,
831 .module = THIS_MODULE,
832 };
833
834 diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
835 index 328ca93781cf..1b76d9585902 100644
836 --- a/drivers/bus/sunxi-rsb.c
837 +++ b/drivers/bus/sunxi-rsb.c
838 @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
839 .match = sunxi_rsb_device_match,
840 .probe = sunxi_rsb_device_probe,
841 .remove = sunxi_rsb_device_remove,
842 + .uevent = of_device_uevent_modalias,
843 };
844
845 static void sunxi_rsb_dev_release(struct device *dev)
846 diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
847 index 3e104f5aa0c2..b56b3f711d94 100644
848 --- a/drivers/crypto/chelsio/Kconfig
849 +++ b/drivers/crypto/chelsio/Kconfig
850 @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
851 select CRYPTO_SHA256
852 select CRYPTO_SHA512
853 select CRYPTO_AUTHENC
854 + select CRYPTO_GF128MUL
855 ---help---
856 The Chelsio Crypto Co-processor driver for T6 adapters.
857
858 diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
859 index a9fd8b9e86cd..699ee5a9a8f9 100644
860 --- a/drivers/crypto/n2_core.c
861 +++ b/drivers/crypto/n2_core.c
862 @@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
863 CWQ_ENTRY_SIZE, 0, NULL);
864 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
865 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
866 + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
867 return -ENOMEM;
868 }
869 return 0;
870 @@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
871 {
872 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
873 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
874 + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
875 + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
876 }
877
878 static long spu_queue_register_workfn(void *arg)
879 diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
880 index ec8ac5c4dd84..055e2e8f985a 100644
881 --- a/drivers/firmware/efi/capsule-loader.c
882 +++ b/drivers/firmware/efi/capsule-loader.c
883 @@ -20,10 +20,6 @@
884
885 #define NO_FURTHER_WRITE_ACTION -1
886
887 -#ifndef phys_to_page
888 -#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
889 -#endif
890 -
891 /**
892 * efi_free_all_buff_pages - free all previous allocated buffer pages
893 * @cap_info: pointer to current instance of capsule_info structure
894 @@ -35,7 +31,7 @@
895 static void efi_free_all_buff_pages(struct capsule_info *cap_info)
896 {
897 while (cap_info->index > 0)
898 - __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
899 + __free_page(cap_info->pages[--cap_info->index]);
900
901 cap_info->index = NO_FURTHER_WRITE_ACTION;
902 }
903 @@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
904
905 cap_info->pages = temp_page;
906
907 + temp_page = krealloc(cap_info->phys,
908 + pages_needed * sizeof(phys_addr_t *),
909 + GFP_KERNEL | __GFP_ZERO);
910 + if (!temp_page)
911 + return -ENOMEM;
912 +
913 + cap_info->phys = temp_page;
914 +
915 return 0;
916 }
917
918 @@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
919 **/
920 static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
921 {
922 + bool do_vunmap = false;
923 int ret;
924
925 - ret = efi_capsule_update(&cap_info->header, cap_info->pages);
926 + /*
927 + * cap_info->capsule may have been assigned already by a quirk
928 + * handler, so only overwrite it if it is NULL
929 + */
930 + if (!cap_info->capsule) {
931 + cap_info->capsule = vmap(cap_info->pages, cap_info->index,
932 + VM_MAP, PAGE_KERNEL);
933 + if (!cap_info->capsule)
934 + return -ENOMEM;
935 + do_vunmap = true;
936 + }
937 +
938 + ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
939 + if (do_vunmap)
940 + vunmap(cap_info->capsule);
941 if (ret) {
942 pr_err("capsule update failed\n");
943 return ret;
944 @@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
945 goto failed;
946 }
947
948 - cap_info->pages[cap_info->index++] = page_to_phys(page);
949 + cap_info->pages[cap_info->index] = page;
950 + cap_info->phys[cap_info->index] = page_to_phys(page);
951 cap_info->page_bytes_remain = PAGE_SIZE;
952 + cap_info->index++;
953 } else {
954 - page = phys_to_page(cap_info->pages[cap_info->index - 1]);
955 + page = cap_info->pages[cap_info->index - 1];
956 }
957
958 kbuff = kmap(page);
959 @@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
960 struct capsule_info *cap_info = file->private_data;
961
962 kfree(cap_info->pages);
963 + kfree(cap_info->phys);
964 kfree(file->private_data);
965 file->private_data = NULL;
966 return 0;
967 @@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
968 return -ENOMEM;
969 }
970
971 + cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
972 + if (!cap_info->phys) {
973 + kfree(cap_info->pages);
974 + kfree(cap_info);
975 + return -ENOMEM;
976 + }
977 +
978 file->private_data = cap_info;
979
980 return 0;
981 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
982 index c9bcc6c45012..ce2ed16f2a30 100644
983 --- a/drivers/gpu/drm/i915/i915_reg.h
984 +++ b/drivers/gpu/drm/i915/i915_reg.h
985 @@ -6944,6 +6944,7 @@ enum {
986 #define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
987
988 #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
989 +#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
990 #define MASK_WAKEMEM (1<<13)
991
992 #define SKL_DFSM _MMIO(0x51000)
993 @@ -8475,6 +8476,7 @@ enum skl_power_gate {
994 #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
995 #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
996 #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
997 +#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
998 #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
999 #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
1000 #define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
1001 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
1002 index 1241e5891b29..26a8dcd2c549 100644
1003 --- a/drivers/gpu/drm/i915/intel_cdclk.c
1004 +++ b/drivers/gpu/drm/i915/intel_cdclk.c
1005 @@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
1006
1007 static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
1008 {
1009 - int min_cdclk = skl_calc_cdclk(0, vco);
1010 u32 val;
1011
1012 WARN_ON(vco != 8100000 && vco != 8640000);
1013
1014 - /* select the minimum CDCLK before enabling DPLL 0 */
1015 - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
1016 - I915_WRITE(CDCLK_CTL, val);
1017 - POSTING_READ(CDCLK_CTL);
1018 -
1019 /*
1020 * We always enable DPLL0 with the lowest link rate possible, but still
1021 * taking into account the VCO required to operate the eDP panel at the
1022 @@ -922,7 +916,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1023 {
1024 int cdclk = cdclk_state->cdclk;
1025 int vco = cdclk_state->vco;
1026 - u32 freq_select, pcu_ack;
1027 + u32 freq_select, pcu_ack, cdclk_ctl;
1028 int ret;
1029
1030 WARN_ON((cdclk == 24000) != (vco == 0));
1031 @@ -939,7 +933,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1032 return;
1033 }
1034
1035 - /* set CDCLK_CTL */
1036 + /* Choose frequency for this cdclk */
1037 switch (cdclk) {
1038 case 450000:
1039 case 432000:
1040 @@ -967,10 +961,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1041 dev_priv->cdclk.hw.vco != vco)
1042 skl_dpll0_disable(dev_priv);
1043
1044 + cdclk_ctl = I915_READ(CDCLK_CTL);
1045 +
1046 + if (dev_priv->cdclk.hw.vco != vco) {
1047 + /* Wa Display #1183: skl,kbl,cfl */
1048 + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
1049 + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1050 + I915_WRITE(CDCLK_CTL, cdclk_ctl);
1051 + }
1052 +
1053 + /* Wa Display #1183: skl,kbl,cfl */
1054 + cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
1055 + I915_WRITE(CDCLK_CTL, cdclk_ctl);
1056 + POSTING_READ(CDCLK_CTL);
1057 +
1058 if (dev_priv->cdclk.hw.vco != vco)
1059 skl_dpll0_enable(dev_priv, vco);
1060
1061 - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
1062 + /* Wa Display #1183: skl,kbl,cfl */
1063 + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
1064 + I915_WRITE(CDCLK_CTL, cdclk_ctl);
1065 +
1066 + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1067 + I915_WRITE(CDCLK_CTL, cdclk_ctl);
1068 +
1069 + /* Wa Display #1183: skl,kbl,cfl */
1070 + cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
1071 + I915_WRITE(CDCLK_CTL, cdclk_ctl);
1072 POSTING_READ(CDCLK_CTL);
1073
1074 /* inform PCU of the change */
1075 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
1076 index 49577eba8e7e..51cb5293bf43 100644
1077 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
1078 +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
1079 @@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1080
1081 DRM_DEBUG_KMS("Enabling DC5\n");
1082
1083 + /* Wa Display #1183: skl,kbl,cfl */
1084 + if (IS_GEN9_BC(dev_priv))
1085 + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1086 + SKL_SELECT_ALTERNATE_DC_EXIT);
1087 +
1088 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1089 }
1090
1091 @@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
1092 {
1093 DRM_DEBUG_KMS("Disabling DC6\n");
1094
1095 + /* Wa Display #1183: skl,kbl,cfl */
1096 + if (IS_GEN9_BC(dev_priv))
1097 + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1098 + SKL_SELECT_ALTERNATE_DC_EXIT);
1099 +
1100 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1101 }
1102
1103 @@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1104 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
1105 BIT_ULL(POWER_DOMAIN_MODESET) | \
1106 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1107 + BIT_ULL(POWER_DOMAIN_GMBUS) | \
1108 BIT_ULL(POWER_DOMAIN_INIT))
1109
1110 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1111 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1112 index b84cd978fce2..a4aaa748e987 100644
1113 --- a/drivers/input/mouse/elantech.c
1114 +++ b/drivers/input/mouse/elantech.c
1115 @@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1116 case 5:
1117 etd->hw_version = 3;
1118 break;
1119 - case 6 ... 14:
1120 + case 6 ... 15:
1121 etd->hw_version = 4;
1122 break;
1123 default:
1124 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1125 index e67ba6c40faf..8f7a3c00b6cf 100644
1126 --- a/drivers/iommu/arm-smmu-v3.c
1127 +++ b/drivers/iommu/arm-smmu-v3.c
1128 @@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1129 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1130 domain->geometry.aperture_end = (1UL << ias) - 1;
1131 domain->geometry.force_aperture = true;
1132 - smmu_domain->pgtbl_ops = pgtbl_ops;
1133
1134 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
1135 - if (ret < 0)
1136 + if (ret < 0) {
1137 free_io_pgtable_ops(pgtbl_ops);
1138 + return ret;
1139 + }
1140
1141 - return ret;
1142 + smmu_domain->pgtbl_ops = pgtbl_ops;
1143 + return 0;
1144 }
1145
1146 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1147 @@ -1644,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1148
1149 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1150 {
1151 - int i;
1152 + int i, j;
1153 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1154 struct arm_smmu_device *smmu = master->smmu;
1155
1156 @@ -1652,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
1157 u32 sid = fwspec->ids[i];
1158 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1159
1160 + /* Bridged PCI devices may end up with duplicated IDs */
1161 + for (j = 0; j < i; j++)
1162 + if (fwspec->ids[j] == sid)
1163 + break;
1164 + if (j < i)
1165 + continue;
1166 +
1167 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
1168 }
1169 }
1170 diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
1171 index 85cff68643e0..125b744c9c28 100644
1172 --- a/drivers/mtd/nand/pxa3xx_nand.c
1173 +++ b/drivers/mtd/nand/pxa3xx_nand.c
1174 @@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
1175
1176 switch (command) {
1177 case NAND_CMD_READ0:
1178 + case NAND_CMD_READOOB:
1179 case NAND_CMD_PAGEPROG:
1180 info->use_ecc = 1;
1181 break;
1182 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
1183 index 19e4ad2f3f2e..0c4b690cf761 100644
1184 --- a/fs/btrfs/delayed-inode.c
1185 +++ b/fs/btrfs/delayed-inode.c
1186 @@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
1187
1188 spin_lock(&root->inode_lock);
1189 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
1190 +
1191 if (node) {
1192 if (btrfs_inode->delayed_node) {
1193 refcount_inc(&node->refs); /* can be accessed */
1194 @@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
1195 spin_unlock(&root->inode_lock);
1196 return node;
1197 }
1198 - btrfs_inode->delayed_node = node;
1199 - /* can be accessed and cached in the inode */
1200 - refcount_add(2, &node->refs);
1201 +
1202 + /*
1203 + * It's possible that we're racing into the middle of removing
1204 + * this node from the radix tree. In this case, the refcount
1205 + * was zero and it should never go back to one. Just return
1206 + * NULL like it was never in the radix at all; our release
1207 + * function is in the process of removing it.
1208 + *
1209 + * Some implementations of refcount_inc refuse to bump the
1210 + * refcount once it has hit zero. If we don't do this dance
1211 + * here, refcount_inc() may decide to just WARN_ONCE() instead
1212 + * of actually bumping the refcount.
1213 + *
1214 + * If this node is properly in the radix, we want to bump the
1215 + * refcount twice, once for the inode and once for this get
1216 + * operation.
1217 + */
1218 + if (refcount_inc_not_zero(&node->refs)) {
1219 + refcount_inc(&node->refs);
1220 + btrfs_inode->delayed_node = node;
1221 + } else {
1222 + node = NULL;
1223 + }
1224 +
1225 spin_unlock(&root->inode_lock);
1226 return node;
1227 }
1228 @@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
1229 mutex_unlock(&delayed_node->mutex);
1230
1231 if (refcount_dec_and_test(&delayed_node->refs)) {
1232 - bool free = false;
1233 struct btrfs_root *root = delayed_node->root;
1234 +
1235 spin_lock(&root->inode_lock);
1236 - if (refcount_read(&delayed_node->refs) == 0) {
1237 - radix_tree_delete(&root->delayed_nodes_tree,
1238 - delayed_node->inode_id);
1239 - free = true;
1240 - }
1241 + /*
1242 + * Once our refcount goes to zero, nobody is allowed to bump it
1243 + * back up. We can delete it now.
1244 + */
1245 + ASSERT(refcount_read(&delayed_node->refs) == 0);
1246 + radix_tree_delete(&root->delayed_nodes_tree,
1247 + delayed_node->inode_id);
1248 spin_unlock(&root->inode_lock);
1249 - if (free)
1250 - kmem_cache_free(delayed_node_cache, delayed_node);
1251 + kmem_cache_free(delayed_node_cache, delayed_node);
1252 }
1253 }
1254
1255 diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
1256 index e0f867cd8553..96f1087e372c 100644
1257 --- a/fs/proc/cpuinfo.c
1258 +++ b/fs/proc/cpuinfo.c
1259 @@ -1,12 +1,18 @@
1260 // SPDX-License-Identifier: GPL-2.0
1261 +#include <linux/cpufreq.h>
1262 #include <linux/fs.h>
1263 #include <linux/init.h>
1264 #include <linux/proc_fs.h>
1265 #include <linux/seq_file.h>
1266
1267 +__weak void arch_freq_prepare_all(void)
1268 +{
1269 +}
1270 +
1271 extern const struct seq_operations cpuinfo_op;
1272 static int cpuinfo_open(struct inode *inode, struct file *file)
1273 {
1274 + arch_freq_prepare_all();
1275 return seq_open(file, &cpuinfo_op);
1276 }
1277
1278 diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1279 index 1c713fd5b3e6..5aa392eae1c3 100644
1280 --- a/fs/userfaultfd.c
1281 +++ b/fs/userfaultfd.c
1282 @@ -570,11 +570,14 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
1283 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1284 struct userfaultfd_wait_queue *ewq)
1285 {
1286 + struct userfaultfd_ctx *release_new_ctx;
1287 +
1288 if (WARN_ON_ONCE(current->flags & PF_EXITING))
1289 goto out;
1290
1291 ewq->ctx = ctx;
1292 init_waitqueue_entry(&ewq->wq, current);
1293 + release_new_ctx = NULL;
1294
1295 spin_lock(&ctx->event_wqh.lock);
1296 /*
1297 @@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1298 new = (struct userfaultfd_ctx *)
1299 (unsigned long)
1300 ewq->msg.arg.reserved.reserved1;
1301 -
1302 - userfaultfd_ctx_put(new);
1303 + release_new_ctx = new;
1304 }
1305 break;
1306 }
1307 @@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
1308 __set_current_state(TASK_RUNNING);
1309 spin_unlock(&ctx->event_wqh.lock);
1310
1311 + if (release_new_ctx) {
1312 + struct vm_area_struct *vma;
1313 + struct mm_struct *mm = release_new_ctx->mm;
1314 +
1315 + /* the various vma->vm_userfaultfd_ctx still points to it */
1316 + down_write(&mm->mmap_sem);
1317 + for (vma = mm->mmap; vma; vma = vma->vm_next)
1318 + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
1319 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1320 + up_write(&mm->mmap_sem);
1321 +
1322 + userfaultfd_ctx_put(release_new_ctx);
1323 + }
1324 +
1325 /*
1326 * ctx may go away after this if the userfault pseudo fd is
1327 * already released.
1328 diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
1329 index 537ff842ff73..cbf85c4c745f 100644
1330 --- a/include/linux/cpufreq.h
1331 +++ b/include/linux/cpufreq.h
1332 @@ -917,6 +917,7 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
1333 }
1334 #endif
1335
1336 +extern void arch_freq_prepare_all(void);
1337 extern unsigned int arch_freq_get_on_cpu(int cpu);
1338
1339 /* the following are really really optional */
1340 diff --git a/include/linux/efi.h b/include/linux/efi.h
1341 index d813f7b04da7..29fdf8029cf6 100644
1342 --- a/include/linux/efi.h
1343 +++ b/include/linux/efi.h
1344 @@ -140,11 +140,13 @@ struct efi_boot_memmap {
1345
1346 struct capsule_info {
1347 efi_capsule_header_t header;
1348 + efi_capsule_header_t *capsule;
1349 int reset_type;
1350 long index;
1351 size_t count;
1352 size_t total_size;
1353 - phys_addr_t *pages;
1354 + struct page **pages;
1355 + phys_addr_t *phys;
1356 size_t page_bytes_remain;
1357 };
1358
1359 diff --git a/include/linux/fscache.h b/include/linux/fscache.h
1360 index f4ff47d4a893..fe0c349684fa 100644
1361 --- a/include/linux/fscache.h
1362 +++ b/include/linux/fscache.h
1363 @@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
1364 {
1365 if (fscache_cookie_valid(cookie) && PageFsCache(page))
1366 return __fscache_maybe_release_page(cookie, page, gfp);
1367 - return false;
1368 + return true;
1369 }
1370
1371 /**
1372 diff --git a/kernel/acct.c b/kernel/acct.c
1373 index 6670fbd3e466..354578d253d5 100644
1374 --- a/kernel/acct.c
1375 +++ b/kernel/acct.c
1376 @@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
1377 {
1378 struct kstatfs sbuf;
1379
1380 - if (time_is_before_jiffies(acct->needcheck))
1381 + if (time_is_after_jiffies(acct->needcheck))
1382 goto out;
1383
1384 /* May block */
1385 diff --git a/kernel/signal.c b/kernel/signal.c
1386 index 8dcd8825b2de..1facff1dbbae 100644
1387 --- a/kernel/signal.c
1388 +++ b/kernel/signal.c
1389 @@ -78,7 +78,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
1390 handler = sig_handler(t, sig);
1391
1392 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
1393 - handler == SIG_DFL && !force)
1394 + handler == SIG_DFL && !(force && sig_kernel_only(sig)))
1395 return 1;
1396
1397 return sig_handler_ignored(handler, sig);
1398 @@ -94,13 +94,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
1399 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1400 return 0;
1401
1402 - if (!sig_task_ignored(t, sig, force))
1403 - return 0;
1404 -
1405 /*
1406 - * Tracers may want to know about even ignored signals.
1407 + * Tracers may want to know about even ignored signal unless it
1408 + * is SIGKILL which can't be reported anyway but can be ignored
1409 + * by SIGNAL_UNKILLABLE task.
1410 */
1411 - return !t->ptrace;
1412 + if (t->ptrace && sig != SIGKILL)
1413 + return 0;
1414 +
1415 + return sig_task_ignored(t, sig, force);
1416 }
1417
1418 /*
1419 @@ -929,9 +931,9 @@ static void complete_signal(int sig, struct task_struct *p, int group)
1420 * then start taking the whole group down immediately.
1421 */
1422 if (sig_fatal(p, sig) &&
1423 - !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
1424 + !(signal->flags & SIGNAL_GROUP_EXIT) &&
1425 !sigismember(&t->real_blocked, sig) &&
1426 - (sig == SIGKILL || !t->ptrace)) {
1427 + (sig == SIGKILL || !p->ptrace)) {
1428 /*
1429 * This signal will be fatal to the whole group.
1430 */
1431 diff --git a/mm/mprotect.c b/mm/mprotect.c
1432 index ec39f730a0bf..58b629bb70de 100644
1433 --- a/mm/mprotect.c
1434 +++ b/mm/mprotect.c
1435 @@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1436 next = pmd_addr_end(addr, end);
1437 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
1438 && pmd_none_or_clear_bad(pmd))
1439 - continue;
1440 + goto next;
1441
1442 /* invoke the mmu notifier if the pmd is populated */
1443 if (!mni_start) {
1444 @@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1445 }
1446
1447 /* huge pmd was handled */
1448 - continue;
1449 + goto next;
1450 }
1451 }
1452 /* fall through, the trans huge pmd just split */
1453 @@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1454 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
1455 dirty_accountable, prot_numa);
1456 pages += this_pages;
1457 +next:
1458 + cond_resched();
1459 } while (pmd++, addr = next, addr != end);
1460
1461 if (mni_start)
1462 diff --git a/mm/sparse.c b/mm/sparse.c
1463 index 60805abf98af..30e56a100ee8 100644
1464 --- a/mm/sparse.c
1465 +++ b/mm/sparse.c
1466 @@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
1467 if (unlikely(!mem_section)) {
1468 unsigned long size, align;
1469
1470 - size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
1471 + size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
1472 align = 1 << (INTERNODE_CACHE_SHIFT);
1473 mem_section = memblock_virt_alloc(size, align);
1474 }
1475 diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
1476 index 82a64b58041d..e395137ecff1 100644
1477 --- a/security/apparmor/mount.c
1478 +++ b/security/apparmor/mount.c
1479 @@ -330,6 +330,9 @@ static int match_mnt_path_str(struct aa_profile *profile,
1480 AA_BUG(!mntpath);
1481 AA_BUG(!buffer);
1482
1483 + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1484 + return 0;
1485 +
1486 error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
1487 &mntpnt, &info, profile->disconnected);
1488 if (error)
1489 @@ -381,6 +384,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path,
1490 AA_BUG(!profile);
1491 AA_BUG(devpath && !devbuffer);
1492
1493 + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1494 + return 0;
1495 +
1496 if (devpath) {
1497 error = aa_path_name(devpath, path_flags(profile, devpath),
1498 devbuffer, &devname, &info,
1499 @@ -559,6 +565,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path,
1500 AA_BUG(!profile);
1501 AA_BUG(!path);
1502
1503 + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1504 + return 0;
1505 +
1506 error = aa_path_name(path, path_flags(profile, path), buffer, &name,
1507 &info, profile->disconnected);
1508 if (error)
1509 @@ -614,7 +623,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile,
1510 AA_BUG(!new_path);
1511 AA_BUG(!old_path);
1512
1513 - if (profile_unconfined(profile))
1514 + if (profile_unconfined(profile) ||
1515 + !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT))
1516 return aa_get_newest_label(&profile->label);
1517
1518 error = aa_path_name(old_path, path_flags(profile, old_path),