Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0232-4.9.133-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3234 - (hide annotations) (download)
Thu Oct 18 08:37:23 2018 UTC (5 years, 8 months ago) by niro
File size: 49540 byte(s)
-linux-4.9.133
1 niro 3234 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2     index a36a695318c6..f9f67be8d3c3 100644
3     --- a/Documentation/kernel-parameters.txt
4     +++ b/Documentation/kernel-parameters.txt
5     @@ -1084,12 +1084,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6     nopku [X86] Disable Memory Protection Keys CPU feature found
7     in some Intel CPUs.
8    
9     - eagerfpu= [X86]
10     - on enable eager fpu restore
11     - off disable eager fpu restore
12     - auto selects the default scheme, which automatically
13     - enables eagerfpu restore for xsaveopt.
14     -
15     module.async_probe [KNL]
16     Enable asynchronous probe on this module.
17    
18     diff --git a/Makefile b/Makefile
19     index a46c9788ca67..18090f899a7c 100644
20     --- a/Makefile
21     +++ b/Makefile
22     @@ -1,6 +1,6 @@
23     VERSION = 4
24     PATCHLEVEL = 9
25     -SUBLEVEL = 132
26     +SUBLEVEL = 133
27     EXTRAVERSION =
28     NAME = Roaring Lionus
29    
30     diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
31     index 0e8c0151a390..3ce12137f94f 100644
32     --- a/arch/arc/kernel/process.c
33     +++ b/arch/arc/kernel/process.c
34     @@ -213,6 +213,26 @@ int copy_thread(unsigned long clone_flags,
35     task_thread_info(current)->thr_ptr;
36     }
37    
38     +
39     + /*
40     + * setup usermode thread pointer #1:
41     + * when child is picked by scheduler, __switch_to() uses @c_callee to
42     + * populate usermode callee regs: this works (despite being in a kernel
43     + * function) since special return path for child @ret_from_fork()
44     + * ensures those regs are not clobbered all the way to RTIE to usermode
45     + */
46     + c_callee->r25 = task_thread_info(p)->thr_ptr;
47     +
48     +#ifdef CONFIG_ARC_CURR_IN_REG
49     + /*
50     + * setup usermode thread pointer #2:
51     + * however for this special use of r25 in kernel, __switch_to() sets
52     + * r25 for kernel needs and only in the final return path is usermode
53     + * r25 setup, from pt_regs->user_r25. So set that up as well
54     + */
55     + c_regs->user_r25 = c_callee->r25;
56     +#endif
57     +
58     return 0;
59     }
60    
61     diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
62     index e3acf5c3480e..02925043575a 100644
63     --- a/arch/powerpc/kernel/fadump.c
64     +++ b/arch/powerpc/kernel/fadump.c
65     @@ -365,9 +365,9 @@ static int __init early_fadump_reserve_mem(char *p)
66     }
67     early_param("fadump_reserve_mem", early_fadump_reserve_mem);
68    
69     -static void register_fw_dump(struct fadump_mem_struct *fdm)
70     +static int register_fw_dump(struct fadump_mem_struct *fdm)
71     {
72     - int rc;
73     + int rc, err;
74     unsigned int wait_time;
75    
76     pr_debug("Registering for firmware-assisted kernel dump...\n");
77     @@ -384,7 +384,11 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
78    
79     } while (wait_time);
80    
81     + err = -EIO;
82     switch (rc) {
83     + default:
84     + pr_err("Failed to register. Unknown Error(%d).\n", rc);
85     + break;
86     case -1:
87     printk(KERN_ERR "Failed to register firmware-assisted kernel"
88     " dump. Hardware Error(%d).\n", rc);
89     @@ -392,18 +396,22 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
90     case -3:
91     printk(KERN_ERR "Failed to register firmware-assisted kernel"
92     " dump. Parameter Error(%d).\n", rc);
93     + err = -EINVAL;
94     break;
95     case -9:
96     printk(KERN_ERR "firmware-assisted kernel dump is already "
97     " registered.");
98     fw_dump.dump_registered = 1;
99     + err = -EEXIST;
100     break;
101     case 0:
102     printk(KERN_INFO "firmware-assisted kernel dump registration"
103     " is successful\n");
104     fw_dump.dump_registered = 1;
105     + err = 0;
106     break;
107     }
108     + return err;
109     }
110    
111     void crash_fadump(struct pt_regs *regs, const char *str)
112     @@ -1006,7 +1014,7 @@ static unsigned long init_fadump_header(unsigned long addr)
113     return addr;
114     }
115    
116     -static void register_fadump(void)
117     +static int register_fadump(void)
118     {
119     unsigned long addr;
120     void *vaddr;
121     @@ -1017,7 +1025,7 @@ static void register_fadump(void)
122     * assisted dump.
123     */
124     if (!fw_dump.reserve_dump_area_size)
125     - return;
126     + return -ENODEV;
127    
128     ret = fadump_setup_crash_memory_ranges();
129     if (ret)
130     @@ -1032,7 +1040,7 @@ static void register_fadump(void)
131     fadump_create_elfcore_headers(vaddr);
132    
133     /* register the future kernel dump with firmware. */
134     - register_fw_dump(&fdm);
135     + return register_fw_dump(&fdm);
136     }
137    
138     static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
139     @@ -1218,7 +1226,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
140     switch (buf[0]) {
141     case '0':
142     if (fw_dump.dump_registered == 0) {
143     - ret = -EINVAL;
144     goto unlock_out;
145     }
146     /* Un-register Firmware-assisted dump */
147     @@ -1226,11 +1233,11 @@ static ssize_t fadump_register_store(struct kobject *kobj,
148     break;
149     case '1':
150     if (fw_dump.dump_registered == 1) {
151     - ret = -EINVAL;
152     + ret = -EEXIST;
153     goto unlock_out;
154     }
155     /* Register Firmware-assisted dump */
156     - register_fadump();
157     + ret = register_fadump();
158     break;
159     default:
160     ret = -EINVAL;
161     diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
162     index dd1958436591..5773e1161072 100644
163     --- a/arch/x86/crypto/crc32c-intel_glue.c
164     +++ b/arch/x86/crypto/crc32c-intel_glue.c
165     @@ -48,21 +48,13 @@
166     #ifdef CONFIG_X86_64
167     /*
168     * use carryless multiply version of crc32c when buffer
169     - * size is >= 512 (when eager fpu is enabled) or
170     - * >= 1024 (when eager fpu is disabled) to account
171     + * size is >= 512 to account
172     * for fpu state save/restore overhead.
173     */
174     -#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
175     -#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
176     +#define CRC32C_PCL_BREAKEVEN 512
177    
178     asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
179     unsigned int crc_init);
180     -static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
181     -#define set_pcl_breakeven_point() \
182     -do { \
183     - if (!use_eager_fpu()) \
184     - crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
185     -} while (0)
186     #endif /* CONFIG_X86_64 */
187    
188     static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
189     @@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
190     * use faster PCL version if datasize is large enough to
191     * overcome kernel fpu state save/restore overhead
192     */
193     - if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
194     + if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
195     kernel_fpu_begin();
196     *crcp = crc_pcl(data, len, *crcp);
197     kernel_fpu_end();
198     @@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
199     static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
200     u8 *out)
201     {
202     - if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
203     + if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
204     kernel_fpu_begin();
205     *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
206     kernel_fpu_end();
207     @@ -257,7 +249,6 @@ static int __init crc32c_intel_mod_init(void)
208     alg.update = crc32c_pcl_intel_update;
209     alg.finup = crc32c_pcl_intel_finup;
210     alg.digest = crc32c_pcl_intel_digest;
211     - set_pcl_breakeven_point();
212     }
213     #endif
214     return crypto_register_shash(&alg);
215     diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
216     index 02223cb4bcfd..1e967099ae51 100644
217     --- a/arch/x86/entry/vdso/vclock_gettime.c
218     +++ b/arch/x86/entry/vdso/vclock_gettime.c
219     @@ -37,8 +37,9 @@ extern u8 pvclock_page
220     notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
221     {
222     long ret;
223     - asm("syscall" : "=a" (ret) :
224     - "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
225     + asm ("syscall" : "=a" (ret), "=m" (*ts) :
226     + "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
227     + "memory", "rcx", "r11");
228     return ret;
229     }
230    
231     @@ -46,8 +47,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
232     {
233     long ret;
234    
235     - asm("syscall" : "=a" (ret) :
236     - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
237     + asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
238     + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
239     + "memory", "rcx", "r11");
240     return ret;
241     }
242    
243     @@ -58,13 +60,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
244     {
245     long ret;
246    
247     - asm(
248     + asm (
249     "mov %%ebx, %%edx \n"
250     - "mov %2, %%ebx \n"
251     + "mov %[clock], %%ebx \n"
252     "call __kernel_vsyscall \n"
253     "mov %%edx, %%ebx \n"
254     - : "=a" (ret)
255     - : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
256     + : "=a" (ret), "=m" (*ts)
257     + : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
258     : "memory", "edx");
259     return ret;
260     }
261     @@ -73,13 +75,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
262     {
263     long ret;
264    
265     - asm(
266     + asm (
267     "mov %%ebx, %%edx \n"
268     - "mov %2, %%ebx \n"
269     + "mov %[tv], %%ebx \n"
270     "call __kernel_vsyscall \n"
271     "mov %%edx, %%ebx \n"
272     - : "=a" (ret)
273     - : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
274     + : "=a" (ret), "=m" (*tv), "=m" (*tz)
275     + : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
276     : "memory", "edx");
277     return ret;
278     }
279     diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
280     index fbc1474960e3..f6d1bc93589c 100644
281     --- a/arch/x86/include/asm/cpufeatures.h
282     +++ b/arch/x86/include/asm/cpufeatures.h
283     @@ -104,7 +104,6 @@
284     #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
285     #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
286     #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
287     -/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
288     #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
289    
290     /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
291     diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
292     index 8554f960e21b..25152843dd1f 100644
293     --- a/arch/x86/include/asm/fixmap.h
294     +++ b/arch/x86/include/asm/fixmap.h
295     @@ -14,6 +14,16 @@
296     #ifndef _ASM_X86_FIXMAP_H
297     #define _ASM_X86_FIXMAP_H
298    
299     +/*
300     + * Exposed to assembly code for setting up initial page tables. Cannot be
301     + * calculated in assembly code (fixmap entries are an enum), but is sanity
302     + * checked in the actual fixmap C code to make sure that the fixmap is
303     + * covered fully.
304     + */
305     +#define FIXMAP_PMD_NUM 2
306     +/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
307     +#define FIXMAP_PMD_TOP 507
308     +
309     #ifndef __ASSEMBLY__
310     #include <linux/kernel.h>
311     #include <asm/acpi.h>
312     diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
313     index 8852e3afa1ad..499d6ed0e376 100644
314     --- a/arch/x86/include/asm/fpu/internal.h
315     +++ b/arch/x86/include/asm/fpu/internal.h
316     @@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
317     /*
318     * FPU related CPU feature flag helper routines:
319     */
320     -static __always_inline __pure bool use_eager_fpu(void)
321     -{
322     - return true;
323     -}
324     -
325     static __always_inline __pure bool use_xsaveopt(void)
326     {
327     return static_cpu_has(X86_FEATURE_XSAVEOPT);
328     @@ -501,24 +496,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
329     }
330    
331    
332     -/*
333     - * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
334     - * idiom, which is then paired with the sw-flag (fpregs_active) later on:
335     - */
336     -
337     -static inline void __fpregs_activate_hw(void)
338     -{
339     - if (!use_eager_fpu())
340     - clts();
341     -}
342     -
343     -static inline void __fpregs_deactivate_hw(void)
344     -{
345     - if (!use_eager_fpu())
346     - stts();
347     -}
348     -
349     -/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
350     static inline void __fpregs_deactivate(struct fpu *fpu)
351     {
352     WARN_ON_FPU(!fpu->fpregs_active);
353     @@ -528,7 +505,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
354     trace_x86_fpu_regs_deactivated(fpu);
355     }
356    
357     -/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
358     static inline void __fpregs_activate(struct fpu *fpu)
359     {
360     WARN_ON_FPU(fpu->fpregs_active);
361     @@ -554,22 +530,17 @@ static inline int fpregs_active(void)
362     }
363    
364     /*
365     - * Encapsulate the CR0.TS handling together with the
366     - * software flag.
367     - *
368     * These generally need preemption protection to work,
369     * do try to avoid using these on their own.
370     */
371     static inline void fpregs_activate(struct fpu *fpu)
372     {
373     - __fpregs_activate_hw();
374     __fpregs_activate(fpu);
375     }
376    
377     static inline void fpregs_deactivate(struct fpu *fpu)
378     {
379     __fpregs_deactivate(fpu);
380     - __fpregs_deactivate_hw();
381     }
382    
383     /*
384     @@ -596,8 +567,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
385     * or if the past 5 consecutive context-switches used math.
386     */
387     fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
388     - new_fpu->fpstate_active &&
389     - (use_eager_fpu() || new_fpu->counter > 5);
390     + new_fpu->fpstate_active;
391    
392     if (old_fpu->fpregs_active) {
393     if (!copy_fpregs_to_fpstate(old_fpu))
394     @@ -611,18 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
395    
396     /* Don't change CR0.TS if we just switch! */
397     if (fpu.preload) {
398     - new_fpu->counter++;
399     __fpregs_activate(new_fpu);
400     trace_x86_fpu_regs_activated(new_fpu);
401     prefetch(&new_fpu->state);
402     - } else {
403     - __fpregs_deactivate_hw();
404     }
405     } else {
406     - old_fpu->counter = 0;
407     old_fpu->last_cpu = -1;
408     if (fpu.preload) {
409     - new_fpu->counter++;
410     if (fpu_want_lazy_restore(new_fpu, cpu))
411     fpu.preload = 0;
412     else
413     diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
414     index 48df486b02f9..3c80f5b9c09d 100644
415     --- a/arch/x86/include/asm/fpu/types.h
416     +++ b/arch/x86/include/asm/fpu/types.h
417     @@ -321,17 +321,6 @@ struct fpu {
418     */
419     unsigned char fpregs_active;
420    
421     - /*
422     - * @counter:
423     - *
424     - * This counter contains the number of consecutive context switches
425     - * during which the FPU stays used. If this is over a threshold, the
426     - * lazy FPU restore logic becomes eager, to save the trap overhead.
427     - * This is an unsigned char so that after 256 iterations the counter
428     - * wraps and the context switch behavior turns lazy again; this is to
429     - * deal with bursty apps that only use the FPU for a short time:
430     - */
431     - unsigned char counter;
432     /*
433     * @state:
434     *
435     @@ -340,29 +329,6 @@ struct fpu {
436     * the registers in the FPU are more recent than this state
437     * copy. If the task context-switches away then they get
438     * saved here and represent the FPU state.
439     - *
440     - * After context switches there may be a (short) time period
441     - * during which the in-FPU hardware registers are unchanged
442     - * and still perfectly match this state, if the tasks
443     - * scheduled afterwards are not using the FPU.
444     - *
445     - * This is the 'lazy restore' window of optimization, which
446     - * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
447     - *
448     - * We detect whether a subsequent task uses the FPU via setting
449     - * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
450     - *
451     - * During this window, if the task gets scheduled again, we
452     - * might be able to skip having to do a restore from this
453     - * memory buffer to the hardware registers - at the cost of
454     - * incurring the overhead of #NM fault traps.
455     - *
456     - * Note that on modern CPUs that support the XSAVEOPT (or other
457     - * optimized XSAVE instructions), we don't use #NM traps anymore,
458     - * as the hardware can track whether FPU registers need saving
459     - * or not. On such CPUs we activate the non-lazy ('eagerfpu')
460     - * logic, which unconditionally saves/restores all FPU state
461     - * across context switches. (if FPU state exists.)
462     */
463     union fpregs_state state;
464     /*
465     diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
466     index 221a32ed1372..d5c4df98aac3 100644
467     --- a/arch/x86/include/asm/pgtable_64.h
468     +++ b/arch/x86/include/asm/pgtable_64.h
469     @@ -13,13 +13,14 @@
470     #include <asm/processor.h>
471     #include <linux/bitops.h>
472     #include <linux/threads.h>
473     +#include <asm/fixmap.h>
474    
475     extern pud_t level3_kernel_pgt[512];
476     extern pud_t level3_ident_pgt[512];
477     extern pmd_t level2_kernel_pgt[512];
478     extern pmd_t level2_fixmap_pgt[512];
479     extern pmd_t level2_ident_pgt[512];
480     -extern pte_t level1_fixmap_pgt[512];
481     +extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
482     extern pgd_t init_level4_pgt[];
483    
484     #define swapper_pg_dir init_level4_pgt
485     diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
486     index 9217ab1f5bf6..342e59789fcd 100644
487     --- a/arch/x86/include/asm/trace/fpu.h
488     +++ b/arch/x86/include/asm/trace/fpu.h
489     @@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
490     __field(struct fpu *, fpu)
491     __field(bool, fpregs_active)
492     __field(bool, fpstate_active)
493     - __field(int, counter)
494     __field(u64, xfeatures)
495     __field(u64, xcomp_bv)
496     ),
497     @@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
498     __entry->fpu = fpu;
499     __entry->fpregs_active = fpu->fpregs_active;
500     __entry->fpstate_active = fpu->fpstate_active;
501     - __entry->counter = fpu->counter;
502     if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
503     __entry->xfeatures = fpu->state.xsave.header.xfeatures;
504     __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
505     }
506     ),
507     - TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
508     + TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
509     __entry->fpu,
510     __entry->fpregs_active,
511     __entry->fpstate_active,
512     - __entry->counter,
513     __entry->xfeatures,
514     __entry->xcomp_bv
515     )
516     diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
517     index 430c095cfa0e..fc965118d2e6 100644
518     --- a/arch/x86/kernel/fpu/core.c
519     +++ b/arch/x86/kernel/fpu/core.c
520     @@ -59,27 +59,9 @@ static bool kernel_fpu_disabled(void)
521     return this_cpu_read(in_kernel_fpu);
522     }
523    
524     -/*
525     - * Were we in an interrupt that interrupted kernel mode?
526     - *
527     - * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
528     - * pair does nothing at all: the thread must not have fpu (so
529     - * that we don't try to save the FPU state), and TS must
530     - * be set (so that the clts/stts pair does nothing that is
531     - * visible in the interrupted kernel thread).
532     - *
533     - * Except for the eagerfpu case when we return true; in the likely case
534     - * the thread has FPU but we are not going to set/clear TS.
535     - */
536     static bool interrupted_kernel_fpu_idle(void)
537     {
538     - if (kernel_fpu_disabled())
539     - return false;
540     -
541     - if (use_eager_fpu())
542     - return true;
543     -
544     - return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
545     + return !kernel_fpu_disabled();
546     }
547    
548     /*
549     @@ -127,7 +109,6 @@ void __kernel_fpu_begin(void)
550     copy_fpregs_to_fpstate(fpu);
551     } else {
552     this_cpu_write(fpu_fpregs_owner_ctx, NULL);
553     - __fpregs_activate_hw();
554     }
555     }
556     EXPORT_SYMBOL(__kernel_fpu_begin);
557     @@ -138,8 +119,6 @@ void __kernel_fpu_end(void)
558    
559     if (fpu->fpregs_active)
560     copy_kernel_to_fpregs(&fpu->state);
561     - else
562     - __fpregs_deactivate_hw();
563    
564     kernel_fpu_enable();
565     }
566     @@ -201,10 +180,7 @@ void fpu__save(struct fpu *fpu)
567     trace_x86_fpu_before_save(fpu);
568     if (fpu->fpregs_active) {
569     if (!copy_fpregs_to_fpstate(fpu)) {
570     - if (use_eager_fpu())
571     - copy_kernel_to_fpregs(&fpu->state);
572     - else
573     - fpregs_deactivate(fpu);
574     + copy_kernel_to_fpregs(&fpu->state);
575     }
576     }
577     trace_x86_fpu_after_save(fpu);
578     @@ -249,7 +225,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
579    
580     int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
581     {
582     - dst_fpu->counter = 0;
583     dst_fpu->fpregs_active = 0;
584     dst_fpu->last_cpu = -1;
585    
586     @@ -262,8 +237,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
587     * Don't let 'init optimized' areas of the XSAVE area
588     * leak into the child task:
589     */
590     - if (use_eager_fpu())
591     - memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
592     + memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
593    
594     /*
595     * Save current FPU registers directly into the child
596     @@ -285,10 +259,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
597     memcpy(&src_fpu->state, &dst_fpu->state,
598     fpu_kernel_xstate_size);
599    
600     - if (use_eager_fpu())
601     - copy_kernel_to_fpregs(&src_fpu->state);
602     - else
603     - fpregs_deactivate(src_fpu);
604     + copy_kernel_to_fpregs(&src_fpu->state);
605     }
606     preempt_enable();
607    
608     @@ -461,7 +432,6 @@ void fpu__restore(struct fpu *fpu)
609     trace_x86_fpu_before_restore(fpu);
610     fpregs_activate(fpu);
611     copy_kernel_to_fpregs(&fpu->state);
612     - fpu->counter++;
613     trace_x86_fpu_after_restore(fpu);
614     kernel_fpu_enable();
615     }
616     @@ -479,7 +449,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
617     void fpu__drop(struct fpu *fpu)
618     {
619     preempt_disable();
620     - fpu->counter = 0;
621    
622     if (fpu->fpregs_active) {
623     /* Ignore delayed exceptions from user space */
624     diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
625     index 3ec0d2d64601..3a9318610c4d 100644
626     --- a/arch/x86/kernel/fpu/signal.c
627     +++ b/arch/x86/kernel/fpu/signal.c
628     @@ -344,11 +344,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
629     }
630    
631     fpu->fpstate_active = 1;
632     - if (use_eager_fpu()) {
633     - preempt_disable();
634     - fpu__restore(fpu);
635     - preempt_enable();
636     - }
637     + preempt_disable();
638     + fpu__restore(fpu);
639     + preempt_enable();
640    
641     return err;
642     } else {
643     diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
644     index abfbb61b18b8..e9d7f461b7fa 100644
645     --- a/arch/x86/kernel/fpu/xstate.c
646     +++ b/arch/x86/kernel/fpu/xstate.c
647     @@ -890,15 +890,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
648     */
649     if (!boot_cpu_has(X86_FEATURE_OSPKE))
650     return -EINVAL;
651     - /*
652     - * For most XSAVE components, this would be an arduous task:
653     - * brining fpstate up to date with fpregs, updating fpstate,
654     - * then re-populating fpregs. But, for components that are
655     - * never lazily managed, we can just access the fpregs
656     - * directly. PKRU is never managed lazily, so we can just
657     - * manipulate it directly. Make sure it stays that way.
658     - */
659     - WARN_ON_ONCE(!use_eager_fpu());
660    
661     /* Set the bits we need in PKRU: */
662     if (init_val & PKEY_DISABLE_ACCESS)
663     diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
664     index 9d72cf547c88..b0d6697ab153 100644
665     --- a/arch/x86/kernel/head_64.S
666     +++ b/arch/x86/kernel/head_64.S
667     @@ -23,6 +23,7 @@
668     #include "../entry/calling.h"
669     #include <asm/export.h>
670     #include <asm/nospec-branch.h>
671     +#include <asm/fixmap.h>
672    
673     #ifdef CONFIG_PARAVIRT
674     #include <asm/asm-offsets.h>
675     @@ -493,13 +494,20 @@ NEXT_PAGE(level2_kernel_pgt)
676     KERNEL_IMAGE_SIZE/PMD_SIZE)
677    
678     NEXT_PAGE(level2_fixmap_pgt)
679     - .fill 506,8,0
680     - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
681     - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
682     - .fill 5,8,0
683     + .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
684     + pgtno = 0
685     + .rept (FIXMAP_PMD_NUM)
686     + .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
687     + + _PAGE_TABLE;
688     + pgtno = pgtno + 1
689     + .endr
690     + /* 6 MB reserved space + a 2MB hole */
691     + .fill 4,8,0
692    
693     NEXT_PAGE(level1_fixmap_pgt)
694     + .rept (FIXMAP_PMD_NUM)
695     .fill 512,8,0
696     + .endr
697    
698     #undef PMDS
699    
700     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
701     index 7e5119c1d15c..c17d3893ae60 100644
702     --- a/arch/x86/kvm/cpuid.c
703     +++ b/arch/x86/kvm/cpuid.c
704     @@ -16,7 +16,6 @@
705     #include <linux/export.h>
706     #include <linux/vmalloc.h>
707     #include <linux/uaccess.h>
708     -#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
709     #include <asm/user.h>
710     #include <asm/fpu/xstate.h>
711     #include "cpuid.h"
712     @@ -114,8 +113,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
713     if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
714     best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
715    
716     - if (use_eager_fpu())
717     - kvm_x86_ops->fpu_activate(vcpu);
718     + kvm_x86_ops->fpu_activate(vcpu);
719    
720     /*
721     * The existing code assumes virtual address is 48-bit in the canonical
722     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
723     index 203d42340fc1..5013ef165f44 100644
724     --- a/arch/x86/kvm/x86.c
725     +++ b/arch/x86/kvm/x86.c
726     @@ -7631,16 +7631,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
727     copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
728     __kernel_fpu_end();
729     ++vcpu->stat.fpu_reload;
730     - /*
731     - * If using eager FPU mode, or if the guest is a frequent user
732     - * of the FPU, just leave the FPU active for next time.
733     - * Every 255 times fpu_counter rolls over to 0; a guest that uses
734     - * the FPU in bursts will revert to loading it on demand.
735     - */
736     - if (!use_eager_fpu()) {
737     - if (++vcpu->fpu_counter < 5)
738     - kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
739     - }
740     trace_kvm_fpu(0);
741     }
742    
743     diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
744     index e30baa8ad94f..8cbed30feb67 100644
745     --- a/arch/x86/mm/pgtable.c
746     +++ b/arch/x86/mm/pgtable.c
747     @@ -536,6 +536,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
748     {
749     unsigned long address = __fix_to_virt(idx);
750    
751     +#ifdef CONFIG_X86_64
752     + /*
753     + * Ensure that the static initial page tables are covering the
754     + * fixmap completely.
755     + */
756     + BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
757     + (FIXMAP_PMD_NUM * PTRS_PER_PTE));
758     +#endif
759     +
760     if (idx >= __end_of_fixed_addresses) {
761     BUG();
762     return;
763     diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
764     index 0bbec041c003..e2d2b3cd4276 100644
765     --- a/arch/x86/mm/pkeys.c
766     +++ b/arch/x86/mm/pkeys.c
767     @@ -142,8 +142,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
768     * Called from the FPU code when creating a fresh set of FPU
769     * registers. This is called from a very specific context where
770     * we know the FPU regstiers are safe for use and we can use PKRU
771     - * directly. The fact that PKRU is only available when we are
772     - * using eagerfpu mode makes this possible.
773     + * directly.
774     */
775     void copy_init_pkru_to_fpregs(void)
776     {
777     diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
778     index c92f75f7ae33..ebceaba20ad1 100644
779     --- a/arch/x86/xen/mmu.c
780     +++ b/arch/x86/xen/mmu.c
781     @@ -1936,7 +1936,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
782     * L3_k[511] -> level2_fixmap_pgt */
783     convert_pfn_mfn(level3_kernel_pgt);
784    
785     - /* L3_k[511][506] -> level1_fixmap_pgt */
786     + /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
787     convert_pfn_mfn(level2_fixmap_pgt);
788     }
789     /* We get [511][511] and have Xen's version of level2_kernel_pgt */
790     @@ -1970,7 +1970,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
791     set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
792     set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
793     set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
794     - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
795     +
796     + for (i = 0; i < FIXMAP_PMD_NUM; i++) {
797     + set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
798     + PAGE_KERNEL_RO);
799     + }
800    
801     /* Pin down new L4 */
802     pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
803     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
804     index dfffba39f723..98517216879d 100644
805     --- a/drivers/base/power/main.c
806     +++ b/drivers/base/power/main.c
807     @@ -1360,8 +1360,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
808    
809     dpm_wait_for_children(dev, async);
810    
811     - if (async_error)
812     + if (async_error) {
813     + dev->power.direct_complete = false;
814     goto Complete;
815     + }
816    
817     /*
818     * If a device configured to wake up the system from sleep states
819     @@ -1373,6 +1375,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
820     pm_wakeup_event(dev, 0);
821    
822     if (pm_wakeup_pending()) {
823     + dev->power.direct_complete = false;
824     async_error = -EBUSY;
825     goto Complete;
826     }
827     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
828     index 0fd0d82f80d2..fa9ef8ed5712 100644
829     --- a/drivers/infiniband/core/ucma.c
830     +++ b/drivers/infiniband/core/ucma.c
831     @@ -1720,6 +1720,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
832     mutex_lock(&mut);
833     if (!ctx->closing) {
834     mutex_unlock(&mut);
835     + ucma_put_ctx(ctx);
836     + wait_for_completion(&ctx->comp);
837     /* rdma_destroy_id ensures that no event handlers are
838     * inflight for that id before releasing it.
839     */
840     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
841     index a184c9830ca5..62eb4b7caff3 100644
842     --- a/drivers/md/dm-cache-metadata.c
843     +++ b/drivers/md/dm-cache-metadata.c
844     @@ -1262,8 +1262,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
845     if (hints_valid) {
846     r = dm_array_cursor_next(&cmd->hint_cursor);
847     if (r) {
848     - DMERR("dm_array_cursor_next for hint failed");
849     - goto out;
850     + dm_array_cursor_end(&cmd->hint_cursor);
851     + hints_valid = false;
852     }
853     }
854     }
855     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
856     index c817627d09ca..58b97226050f 100644
857     --- a/drivers/md/dm-cache-target.c
858     +++ b/drivers/md/dm-cache-target.c
859     @@ -3390,8 +3390,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
860    
861     static bool can_resize(struct cache *cache, dm_cblock_t new_size)
862     {
863     - if (from_cblock(new_size) > from_cblock(cache->cache_size))
864     - return true;
865     + if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
866     + if (cache->sized) {
867     + DMERR("%s: unable to extend cache due to missing cache table reload",
868     + cache_device_name(cache));
869     + return false;
870     + }
871     + }
872    
873     /*
874     * We can't drop a dirty block when shrinking the cache.
875     diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
876     index 0dadc6044dba..b106a06d21cb 100644
877     --- a/drivers/net/wireless/ath/ath10k/debug.c
878     +++ b/drivers/net/wireless/ath/ath10k/debug.c
879     @@ -1,6 +1,7 @@
880     /*
881     * Copyright (c) 2005-2011 Atheros Communications Inc.
882     * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
883     + * Copyright (c) 2018, The Linux Foundation. All rights reserved.
884     *
885     * Permission to use, copy, modify, and/or distribute this software for any
886     * purpose with or without fee is hereby granted, provided that the above
887     @@ -161,6 +162,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
888     void ath10k_debug_print_board_info(struct ath10k *ar)
889     {
890     char boardinfo[100];
891     + const struct firmware *board;
892     + u32 crc;
893    
894     if (ar->id.bmi_ids_valid)
895     scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
896     @@ -168,11 +171,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
897     else
898     scnprintf(boardinfo, sizeof(boardinfo), "N/A");
899    
900     + board = ar->normal_mode_fw.board;
901     + if (!IS_ERR_OR_NULL(board))
902     + crc = crc32_le(0, board->data, board->size);
903     + else
904     + crc = 0;
905     +
906     ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
907     ar->bd_api,
908     boardinfo,
909     - crc32_le(0, ar->normal_mode_fw.board->data,
910     - ar->normal_mode_fw.board->size));
911     + crc);
912     }
913    
914     void ath10k_debug_print_boot_info(struct ath10k *ar)
915     diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
916     index e0d00cef0bd8..5b974bb76e6c 100644
917     --- a/drivers/net/wireless/ath/ath10k/trace.h
918     +++ b/drivers/net/wireless/ath/ath10k/trace.h
919     @@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
920     );
921    
922     TRACE_EVENT(ath10k_wmi_cmd,
923     - TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
924     - int ret),
925     + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
926    
927     - TP_ARGS(ar, id, buf, buf_len, ret),
928     + TP_ARGS(ar, id, buf, buf_len),
929    
930     TP_STRUCT__entry(
931     __string(device, dev_name(ar->dev))
932     @@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
933     __field(unsigned int, id)
934     __field(size_t, buf_len)
935     __dynamic_array(u8, buf, buf_len)
936     - __field(int, ret)
937     ),
938    
939     TP_fast_assign(
940     @@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
941     __assign_str(driver, dev_driver_string(ar->dev));
942     __entry->id = id;
943     __entry->buf_len = buf_len;
944     - __entry->ret = ret;
945     memcpy(__get_dynamic_array(buf), buf, buf_len);
946     ),
947    
948     TP_printk(
949     - "%s %s id %d len %zu ret %d",
950     + "%s %s id %d len %zu",
951     __get_str(driver),
952     __get_str(device),
953     __entry->id,
954     - __entry->buf_len,
955     - __entry->ret
956     + __entry->buf_len
957     )
958     );
959    
960     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
961     index f69b98f4276b..642a441a6586 100644
962     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
963     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
964     @@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
965     bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
966     ie_len = roundup(arg->ie_len, 4);
967     len = (sizeof(*tlv) + sizeof(*cmd)) +
968     - (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
969     - (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
970     - (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
971     - (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
972     + sizeof(*tlv) + chan_len +
973     + sizeof(*tlv) + ssid_len +
974     + sizeof(*tlv) + bssid_len +
975     + sizeof(*tlv) + ie_len;
976    
977     skb = ath10k_wmi_alloc_skb(ar, len);
978     if (!skb)
979     diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
980     index e518b640aad0..75f7a7b549df 100644
981     --- a/drivers/net/wireless/ath/ath10k/wmi.c
982     +++ b/drivers/net/wireless/ath/ath10k/wmi.c
983     @@ -1711,8 +1711,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
984     cmd_hdr->cmd_id = __cpu_to_le32(cmd);
985    
986     memset(skb_cb, 0, sizeof(*skb_cb));
987     + trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
988     ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
989     - trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
990    
991     if (ret)
992     goto err_pull;
993     diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
994     index 3c4c58b9fe76..3b6fb5b3bdb2 100644
995     --- a/drivers/net/xen-netback/hash.c
996     +++ b/drivers/net/xen-netback/hash.c
997     @@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
998     u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
999     u32 off)
1000     {
1001     - u32 *mapping = &vif->hash.mapping[off];
1002     + u32 *mapping = vif->hash.mapping;
1003     struct gnttab_copy copy_op = {
1004     .source.u.ref = gref,
1005     .source.domid = vif->domid,
1006     - .dest.u.gmfn = virt_to_gfn(mapping),
1007     .dest.domid = DOMID_SELF,
1008     - .dest.offset = xen_offset_in_page(mapping),
1009     - .len = len * sizeof(u32),
1010     + .len = len * sizeof(*mapping),
1011     .flags = GNTCOPY_source_gref
1012     };
1013    
1014     - if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
1015     + if ((off + len < off) || (off + len > vif->hash.size) ||
1016     + len > XEN_PAGE_SIZE / sizeof(*mapping))
1017     return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
1018    
1019     + copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
1020     + copy_op.dest.offset = xen_offset_in_page(mapping + off);
1021     +
1022     while (len-- != 0)
1023     if (mapping[off++] >= vif->num_queues)
1024     return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
1025     diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
1026     index 90b5a898d6b1..0a1ebbbd3f16 100644
1027     --- a/drivers/of/unittest.c
1028     +++ b/drivers/of/unittest.c
1029     @@ -548,6 +548,9 @@ static void __init of_unittest_parse_interrupts(void)
1030     struct of_phandle_args args;
1031     int i, rc;
1032    
1033     + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
1034     + return;
1035     +
1036     np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
1037     if (!np) {
1038     pr_err("missing testcase data\n");
1039     @@ -622,6 +625,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
1040     struct of_phandle_args args;
1041     int i, rc;
1042    
1043     + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
1044     + return;
1045     +
1046     np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
1047     if (!np) {
1048     pr_err("missing testcase data\n");
1049     @@ -778,15 +784,19 @@ static void __init of_unittest_platform_populate(void)
1050     pdev = of_find_device_by_node(np);
1051     unittest(pdev, "device 1 creation failed\n");
1052    
1053     - irq = platform_get_irq(pdev, 0);
1054     - unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
1055     -
1056     - /* Test that a parsing failure does not return -EPROBE_DEFER */
1057     - np = of_find_node_by_path("/testcase-data/testcase-device2");
1058     - pdev = of_find_device_by_node(np);
1059     - unittest(pdev, "device 2 creation failed\n");
1060     - irq = platform_get_irq(pdev, 0);
1061     - unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
1062     + if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
1063     + irq = platform_get_irq(pdev, 0);
1064     + unittest(irq == -EPROBE_DEFER,
1065     + "device deferred probe failed - %d\n", irq);
1066     +
1067     + /* Test that a parsing failure does not return -EPROBE_DEFER */
1068     + np = of_find_node_by_path("/testcase-data/testcase-device2");
1069     + pdev = of_find_device_by_node(np);
1070     + unittest(pdev, "device 2 creation failed\n");
1071     + irq = platform_get_irq(pdev, 0);
1072     + unittest(irq < 0 && irq != -EPROBE_DEFER,
1073     + "device parsing error failed - %d\n", irq);
1074     + }
1075    
1076     np = of_find_node_by_path("/testcase-data/platform-tests");
1077     unittest(np, "No testcase data in device tree\n");
1078     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1079     index 6b3c5c4cbb37..ccbbd4cde0f1 100644
1080     --- a/drivers/pci/pci.c
1081     +++ b/drivers/pci/pci.c
1082     @@ -1114,12 +1114,12 @@ int pci_save_state(struct pci_dev *dev)
1083     EXPORT_SYMBOL(pci_save_state);
1084    
1085     static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1086     - u32 saved_val, int retry)
1087     + u32 saved_val, int retry, bool force)
1088     {
1089     u32 val;
1090    
1091     pci_read_config_dword(pdev, offset, &val);
1092     - if (val == saved_val)
1093     + if (!force && val == saved_val)
1094     return;
1095    
1096     for (;;) {
1097     @@ -1138,25 +1138,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1098     }
1099    
1100     static void pci_restore_config_space_range(struct pci_dev *pdev,
1101     - int start, int end, int retry)
1102     + int start, int end, int retry,
1103     + bool force)
1104     {
1105     int index;
1106    
1107     for (index = end; index >= start; index--)
1108     pci_restore_config_dword(pdev, 4 * index,
1109     pdev->saved_config_space[index],
1110     - retry);
1111     + retry, force);
1112     }
1113    
1114     static void pci_restore_config_space(struct pci_dev *pdev)
1115     {
1116     if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1117     - pci_restore_config_space_range(pdev, 10, 15, 0);
1118     + pci_restore_config_space_range(pdev, 10, 15, 0, false);
1119     /* Restore BARs before the command register. */
1120     - pci_restore_config_space_range(pdev, 4, 9, 10);
1121     - pci_restore_config_space_range(pdev, 0, 3, 0);
1122     + pci_restore_config_space_range(pdev, 4, 9, 10, false);
1123     + pci_restore_config_space_range(pdev, 0, 3, 0, false);
1124     + } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1125     + pci_restore_config_space_range(pdev, 12, 15, 0, false);
1126     +
1127     + /*
1128     + * Force rewriting of prefetch registers to avoid S3 resume
1129     + * issues on Intel PCI bridges that occur when these
1130     + * registers are not explicitly written.
1131     + */
1132     + pci_restore_config_space_range(pdev, 9, 11, 0, true);
1133     + pci_restore_config_space_range(pdev, 0, 8, 0, false);
1134     } else {
1135     - pci_restore_config_space_range(pdev, 0, 15, 0);
1136     + pci_restore_config_space_range(pdev, 0, 15, 0, false);
1137     }
1138     }
1139    
1140     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1141     index 789c81482542..e6429d419b80 100644
1142     --- a/drivers/tty/tty_io.c
1143     +++ b/drivers/tty/tty_io.c
1144     @@ -1475,6 +1475,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1145     static int tty_reopen(struct tty_struct *tty)
1146     {
1147     struct tty_driver *driver = tty->driver;
1148     + int retval;
1149    
1150     if (driver->type == TTY_DRIVER_TYPE_PTY &&
1151     driver->subtype == PTY_TYPE_MASTER)
1152     @@ -1488,10 +1489,14 @@ static int tty_reopen(struct tty_struct *tty)
1153    
1154     tty->count++;
1155    
1156     - if (!tty->ldisc)
1157     - return tty_ldisc_reinit(tty, tty->termios.c_line);
1158     + if (tty->ldisc)
1159     + return 0;
1160    
1161     - return 0;
1162     + retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1163     + if (retval)
1164     + tty->count--;
1165     +
1166     + return retval;
1167     }
1168    
1169     /**
1170     diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1171     index ce9e457e60c3..c10875834a5a 100644
1172     --- a/drivers/usb/host/xhci-mtk.c
1173     +++ b/drivers/usb/host/xhci-mtk.c
1174     @@ -735,10 +735,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
1175     xhci_mtk_host_enable(mtk);
1176    
1177     xhci_dbg(xhci, "%s: restart port polling\n", __func__);
1178     - set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1179     - usb_hcd_poll_rh_status(hcd);
1180     set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1181     usb_hcd_poll_rh_status(xhci->shared_hcd);
1182     + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1183     + usb_hcd_poll_rh_status(hcd);
1184     return 0;
1185     }
1186    
1187     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1188     index f6782a347cde..b5140555a8d5 100644
1189     --- a/drivers/usb/host/xhci-pci.c
1190     +++ b/drivers/usb/host/xhci-pci.c
1191     @@ -179,6 +179,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1192     }
1193     if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1194     (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
1195     + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
1196     + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
1197     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
1198     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
1199     xhci->quirks |= XHCI_MISSING_CAS;
1200     diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1201     index 2674da40d9cd..6d6acf2c07c3 100644
1202     --- a/drivers/usb/serial/usb-serial-simple.c
1203     +++ b/drivers/usb/serial/usb-serial-simple.c
1204     @@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
1205    
1206     /* Motorola Tetra driver */
1207     #define MOTOROLA_TETRA_IDS() \
1208     - { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1209     + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
1210     + { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
1211     DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1212    
1213     /* Novatel Wireless GPS driver */
1214     diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1215     index ef69273074ba..a3edb20ea4c3 100644
1216     --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1217     +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1218     @@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
1219     if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
1220     return -EFAULT;
1221    
1222     + if (mr->w > 4096 || mr->h > 4096)
1223     + return -EINVAL;
1224     +
1225     if (mr->w * mr->h * 3 > mr->buffer_size)
1226     return -EINVAL;
1227    
1228     @@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
1229     mr->x, mr->y, mr->w, mr->h);
1230    
1231     if (r > 0) {
1232     - if (copy_to_user(mr->buffer, buf, mr->buffer_size))
1233     + if (copy_to_user(mr->buffer, buf, r))
1234     r = -EFAULT;
1235     }
1236    
1237     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1238     index c19c96840480..c10180d0b018 100644
1239     --- a/fs/ext4/xattr.c
1240     +++ b/fs/ext4/xattr.c
1241     @@ -209,12 +209,12 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
1242     {
1243     int error;
1244    
1245     - if (buffer_verified(bh))
1246     - return 0;
1247     -
1248     if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1249     BHDR(bh)->h_blocks != cpu_to_le32(1))
1250     return -EFSCORRUPTED;
1251     + if (buffer_verified(bh))
1252     + return 0;
1253     +
1254     if (!ext4_xattr_block_csum_verify(inode, bh))
1255     return -EFSBADCRC;
1256     error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
1257     @@ -645,14 +645,20 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1258     }
1259    
1260     static int
1261     -ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
1262     +ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
1263     + struct inode *inode)
1264     {
1265     - struct ext4_xattr_entry *last;
1266     + struct ext4_xattr_entry *last, *next;
1267     size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
1268    
1269     /* Compute min_offs and last. */
1270     last = s->first;
1271     - for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1272     + for (; !IS_LAST_ENTRY(last); last = next) {
1273     + next = EXT4_XATTR_NEXT(last);
1274     + if ((void *)next >= s->end) {
1275     + EXT4_ERROR_INODE(inode, "corrupted xattr entries");
1276     + return -EIO;
1277     + }
1278     if (last->e_value_size) {
1279     size_t offs = le16_to_cpu(last->e_value_offs);
1280     if (offs < min_offs)
1281     @@ -834,7 +840,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1282     mb_cache_entry_delete_block(ext4_mb_cache, hash,
1283     bs->bh->b_blocknr);
1284     ea_bdebug(bs->bh, "modifying in-place");
1285     - error = ext4_xattr_set_entry(i, s);
1286     + error = ext4_xattr_set_entry(i, s, inode);
1287     if (!error) {
1288     if (!IS_LAST_ENTRY(s->first))
1289     ext4_xattr_rehash(header(s->base),
1290     @@ -881,7 +887,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1291     s->end = s->base + sb->s_blocksize;
1292     }
1293    
1294     - error = ext4_xattr_set_entry(i, s);
1295     + error = ext4_xattr_set_entry(i, s, inode);
1296     if (error == -EFSCORRUPTED)
1297     goto bad_block;
1298     if (error)
1299     @@ -1079,7 +1085,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1300    
1301     if (EXT4_I(inode)->i_extra_isize == 0)
1302     return -ENOSPC;
1303     - error = ext4_xattr_set_entry(i, s);
1304     + error = ext4_xattr_set_entry(i, s, inode);
1305     if (error) {
1306     if (error == -ENOSPC &&
1307     ext4_has_inline_data(inode)) {
1308     @@ -1091,7 +1097,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
1309     error = ext4_xattr_ibody_find(inode, i, is);
1310     if (error)
1311     return error;
1312     - error = ext4_xattr_set_entry(i, s);
1313     + error = ext4_xattr_set_entry(i, s, inode);
1314     }
1315     if (error)
1316     return error;
1317     @@ -1117,7 +1123,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
1318    
1319     if (EXT4_I(inode)->i_extra_isize == 0)
1320     return -ENOSPC;
1321     - error = ext4_xattr_set_entry(i, s);
1322     + error = ext4_xattr_set_entry(i, s, inode);
1323     if (error)
1324     return error;
1325     header = IHDR(inode, ext4_raw_inode(&is->iloc));
1326     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1327     index b4dbc2f59656..aee2a066a446 100644
1328     --- a/fs/f2fs/checkpoint.c
1329     +++ b/fs/f2fs/checkpoint.c
1330     @@ -676,6 +676,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1331    
1332     crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1333     if (crc_offset >= blk_size) {
1334     + f2fs_put_page(*cp_page, 1);
1335     f2fs_msg(sbi->sb, KERN_WARNING,
1336     "invalid crc_offset: %zu", crc_offset);
1337     return -EINVAL;
1338     @@ -684,6 +685,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1339     crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
1340     + crc_offset)));
1341     if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
1342     + f2fs_put_page(*cp_page, 1);
1343     f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
1344     return -EINVAL;
1345     }
1346     @@ -703,14 +705,14 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1347     err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1348     &cp_page_1, version);
1349     if (err)
1350     - goto invalid_cp1;
1351     + return NULL;
1352     pre_version = *version;
1353    
1354     cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1355     err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1356     &cp_page_2, version);
1357     if (err)
1358     - goto invalid_cp2;
1359     + goto invalid_cp;
1360     cur_version = *version;
1361    
1362     if (cur_version == pre_version) {
1363     @@ -718,9 +720,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1364     f2fs_put_page(cp_page_2, 1);
1365     return cp_page_1;
1366     }
1367     -invalid_cp2:
1368     f2fs_put_page(cp_page_2, 1);
1369     -invalid_cp1:
1370     +invalid_cp:
1371     f2fs_put_page(cp_page_1, 1);
1372     return NULL;
1373     }
1374     diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
1375     index 03dda1cbe485..727a9e3fa806 100644
1376     --- a/fs/ubifs/super.c
1377     +++ b/fs/ubifs/super.c
1378     @@ -1918,6 +1918,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1379     int dev, vol;
1380     char *endptr;
1381    
1382     + if (!name || !*name)
1383     + return ERR_PTR(-EINVAL);
1384     +
1385     /* First, try to open using the device node path method */
1386     ubi = ubi_open_volume_path(name, mode);
1387     if (!IS_ERR(ubi))
1388     diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
1389     index 984b2112c77b..ea8a97793d2d 100644
1390     --- a/include/linux/netfilter_bridge/ebtables.h
1391     +++ b/include/linux/netfilter_bridge/ebtables.h
1392     @@ -123,4 +123,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
1393     /* True if the target is not a standard target */
1394     #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
1395    
1396     +static inline bool ebt_invalid_target(int target)
1397     +{
1398     + return (target < -NUM_STANDARD_TARGETS || target >= 0);
1399     +}
1400     +
1401     #endif
1402     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1403     index 4c233437ee1a..bb0cf1caf1cd 100644
1404     --- a/kernel/cgroup.c
1405     +++ b/kernel/cgroup.c
1406     @@ -4386,7 +4386,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
1407     */
1408     do {
1409     css_task_iter_start(&from->self, &it);
1410     - task = css_task_iter_next(&it);
1411     +
1412     + do {
1413     + task = css_task_iter_next(&it);
1414     + } while (task && (task->flags & PF_EXITING));
1415     +
1416     if (task)
1417     get_task_struct(task);
1418     css_task_iter_end(&it);
1419     diff --git a/mm/vmstat.c b/mm/vmstat.c
1420     index 5f658b6a684f..d31e801a467c 100644
1421     --- a/mm/vmstat.c
1422     +++ b/mm/vmstat.c
1423     @@ -1078,6 +1078,9 @@ const char * const vmstat_text[] = {
1424     #ifdef CONFIG_SMP
1425     "nr_tlb_remote_flush",
1426     "nr_tlb_remote_flush_received",
1427     +#else
1428     + "", /* nr_tlb_remote_flush */
1429     + "", /* nr_tlb_remote_flush_received */
1430     #endif /* CONFIG_SMP */
1431     "nr_tlb_local_flush_all",
1432     "nr_tlb_local_flush_one",
1433     diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
1434     index 070cf134a22f..f2660c1b29e4 100644
1435     --- a/net/bridge/netfilter/ebt_arpreply.c
1436     +++ b/net/bridge/netfilter/ebt_arpreply.c
1437     @@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
1438     if (e->ethproto != htons(ETH_P_ARP) ||
1439     e->invflags & EBT_IPROTO)
1440     return -EINVAL;
1441     + if (ebt_invalid_target(info->target))
1442     + return -EINVAL;
1443     +
1444     return 0;
1445     }
1446    
1447     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
1448     index e63fd12f923a..6ef9d32c34f1 100644
1449     --- a/net/mac80211/cfg.c
1450     +++ b/net/mac80211/cfg.c
1451     @@ -386,7 +386,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1452     case NL80211_IFTYPE_AP:
1453     case NL80211_IFTYPE_AP_VLAN:
1454     /* Keys without a station are used for TX only */
1455     - if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
1456     + if (sta && test_sta_flag(sta, WLAN_STA_MFP))
1457     key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
1458     break;
1459     case NL80211_IFTYPE_ADHOC:
1460     diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
1461     index fbc1474960e3..f6d1bc93589c 100644
1462     --- a/tools/arch/x86/include/asm/cpufeatures.h
1463     +++ b/tools/arch/x86/include/asm/cpufeatures.h
1464     @@ -104,7 +104,6 @@
1465     #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
1466     #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
1467     #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
1468     -/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
1469     #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
1470    
1471     /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */