Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.13/0111-4.13.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3022 - (hide annotations) (download)
Thu Nov 16 11:09:07 2017 UTC (6 years, 6 months ago) by niro
File size: 46135 byte(s)
-linux-4.13.12
1 niro 3022 diff --git a/Makefile b/Makefile
2     index 8280953c8a45..a7c847f495b0 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 13
8     -SUBLEVEL = 11
9     +SUBLEVEL = 12
10     EXTRAVERSION =
11     NAME = Fearless Coyote
12    
13     diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
14     index 50c5e8417802..10b99530280a 100644
15     --- a/arch/arm/boot/dts/armada-375.dtsi
16     +++ b/arch/arm/boot/dts/armada-375.dtsi
17     @@ -178,9 +178,9 @@
18     reg = <0x8000 0x1000>;
19     cache-unified;
20     cache-level = <2>;
21     - arm,double-linefill-incr = <1>;
22     + arm,double-linefill-incr = <0>;
23     arm,double-linefill-wrap = <0>;
24     - arm,double-linefill = <1>;
25     + arm,double-linefill = <0>;
26     prefetch-data = <1>;
27     };
28    
29     diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
30     index af31f5d6c0e5..c3448622e79e 100644
31     --- a/arch/arm/boot/dts/armada-38x.dtsi
32     +++ b/arch/arm/boot/dts/armada-38x.dtsi
33     @@ -143,9 +143,9 @@
34     reg = <0x8000 0x1000>;
35     cache-unified;
36     cache-level = <2>;
37     - arm,double-linefill-incr = <1>;
38     + arm,double-linefill-incr = <0>;
39     arm,double-linefill-wrap = <0>;
40     - arm,double-linefill = <1>;
41     + arm,double-linefill = <0>;
42     prefetch-data = <1>;
43     };
44    
45     diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
46     index 60fbfd5907c7..55d02641d930 100644
47     --- a/arch/arm/boot/dts/armada-39x.dtsi
48     +++ b/arch/arm/boot/dts/armada-39x.dtsi
49     @@ -111,9 +111,9 @@
50     reg = <0x8000 0x1000>;
51     cache-unified;
52     cache-level = <2>;
53     - arm,double-linefill-incr = <1>;
54     + arm,double-linefill-incr = <0>;
55     arm,double-linefill-wrap = <0>;
56     - arm,double-linefill = <1>;
57     + arm,double-linefill = <0>;
58     prefetch-data = <1>;
59     };
60    
61     diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
62     index 721ab5ecfb9b..0f2c8a2a8131 100644
63     --- a/arch/arm/include/asm/Kbuild
64     +++ b/arch/arm/include/asm/Kbuild
65     @@ -20,7 +20,6 @@ generic-y += simd.h
66     generic-y += sizes.h
67     generic-y += timex.h
68     generic-y += trace_clock.h
69     -generic-y += unaligned.h
70    
71     generated-y += mach-types.h
72     generated-y += unistd-nr.h
73     diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
74     new file mode 100644
75     index 000000000000..ab905ffcf193
76     --- /dev/null
77     +++ b/arch/arm/include/asm/unaligned.h
78     @@ -0,0 +1,27 @@
79     +#ifndef __ASM_ARM_UNALIGNED_H
80     +#define __ASM_ARM_UNALIGNED_H
81     +
82     +/*
83     + * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
84     + * but we don't want to use linux/unaligned/access_ok.h since that can lead
85     + * to traps on unaligned stm/ldm or strd/ldrd.
86     + */
87     +#include <asm/byteorder.h>
88     +
89     +#if defined(__LITTLE_ENDIAN)
90     +# include <linux/unaligned/le_struct.h>
91     +# include <linux/unaligned/be_byteshift.h>
92     +# include <linux/unaligned/generic.h>
93     +# define get_unaligned __get_unaligned_le
94     +# define put_unaligned __put_unaligned_le
95     +#elif defined(__BIG_ENDIAN)
96     +# include <linux/unaligned/be_struct.h>
97     +# include <linux/unaligned/le_byteshift.h>
98     +# include <linux/unaligned/generic.h>
99     +# define get_unaligned __get_unaligned_be
100     +# define put_unaligned __put_unaligned_be
101     +#else
102     +# error need to define endianess
103     +#endif
104     +
105     +#endif /* __ASM_ARM_UNALIGNED_H */
106     diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
107     index 0064b86a2c87..30a13647c54c 100644
108     --- a/arch/arm/kvm/emulate.c
109     +++ b/arch/arm/kvm/emulate.c
110     @@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
111     u32 return_offset = (is_thumb) ? 2 : 4;
112    
113     kvm_update_psr(vcpu, UND_MODE);
114     - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
115     + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
116    
117     /* Branch to exception vector */
118     *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
119     @@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
120     */
121     static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
122     {
123     - unsigned long cpsr = *vcpu_cpsr(vcpu);
124     - bool is_thumb = (cpsr & PSR_T_BIT);
125     u32 vect_offset;
126     - u32 return_offset = (is_thumb) ? 4 : 0;
127     + u32 return_offset = (is_pabt) ? 4 : 8;
128     bool is_lpae;
129    
130     kvm_update_psr(vcpu, ABT_MODE);
131     diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
132     index 8679405b0b2b..92eab1d51785 100644
133     --- a/arch/arm/kvm/hyp/Makefile
134     +++ b/arch/arm/kvm/hyp/Makefile
135     @@ -2,7 +2,7 @@
136     # Makefile for Kernel-based Virtual Machine module, HYP part
137     #
138    
139     -ccflags-y += -fno-stack-protector
140     +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
141    
142     KVM=../../../../virt/kvm
143    
144     diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
145     index 8a62648848e5..c99ffd8dce27 100644
146     --- a/arch/arm64/kernel/traps.c
147     +++ b/arch/arm64/kernel/traps.c
148     @@ -116,7 +116,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
149     for (i = -4; i < 1; i++) {
150     unsigned int val, bad;
151    
152     - bad = __get_user(val, &((u32 *)addr)[i]);
153     + bad = get_user(val, &((u32 *)addr)[i]);
154    
155     if (!bad)
156     p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
157     diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
158     index 14c4e3b14bcb..48b03547a969 100644
159     --- a/arch/arm64/kvm/hyp/Makefile
160     +++ b/arch/arm64/kvm/hyp/Makefile
161     @@ -2,7 +2,7 @@
162     # Makefile for Kernel-based Virtual Machine module, HYP part
163     #
164    
165     -ccflags-y += -fno-stack-protector
166     +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
167    
168     KVM=../../../../virt/kvm
169    
170     diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
171     index da6a8cfa54a0..3556715a774e 100644
172     --- a/arch/arm64/kvm/inject_fault.c
173     +++ b/arch/arm64/kvm/inject_fault.c
174     @@ -33,12 +33,26 @@
175     #define LOWER_EL_AArch64_VECTOR 0x400
176     #define LOWER_EL_AArch32_VECTOR 0x600
177    
178     +/*
179     + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
180     + */
181     +static const u8 return_offsets[8][2] = {
182     + [0] = { 0, 0 }, /* Reset, unused */
183     + [1] = { 4, 2 }, /* Undefined */
184     + [2] = { 0, 0 }, /* SVC, unused */
185     + [3] = { 4, 4 }, /* Prefetch abort */
186     + [4] = { 8, 8 }, /* Data abort */
187     + [5] = { 0, 0 }, /* HVC, unused */
188     + [6] = { 4, 4 }, /* IRQ, unused */
189     + [7] = { 4, 4 }, /* FIQ, unused */
190     +};
191     +
192     static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
193     {
194     unsigned long cpsr;
195     unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
196     bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
197     - u32 return_offset = (is_thumb) ? 4 : 0;
198     + u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
199     u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
200    
201     cpsr = mode | COMPAT_PSR_I_BIT;
202     diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
203     index 76923349b4fe..797da807916f 100644
204     --- a/arch/mips/kernel/smp-cmp.c
205     +++ b/arch/mips/kernel/smp-cmp.c
206     @@ -19,7 +19,7 @@
207     #undef DEBUG
208    
209     #include <linux/kernel.h>
210     -#include <linux/sched.h>
211     +#include <linux/sched/task_stack.h>
212     #include <linux/smp.h>
213     #include <linux/cpumask.h>
214     #include <linux/interrupt.h>
215     diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
216     index 6bace7695788..20d7bc5f0eb5 100644
217     --- a/arch/mips/kernel/smp.c
218     +++ b/arch/mips/kernel/smp.c
219     @@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
220     cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
221     EXPORT_SYMBOL(cpu_core_map);
222    
223     +static DECLARE_COMPLETION(cpu_starting);
224     static DECLARE_COMPLETION(cpu_running);
225    
226     /*
227     @@ -376,6 +377,12 @@ asmlinkage void start_secondary(void)
228     cpumask_set_cpu(cpu, &cpu_coherent_mask);
229     notify_cpu_starting(cpu);
230    
231     + /* Notify boot CPU that we're starting & ready to sync counters */
232     + complete(&cpu_starting);
233     +
234     + synchronise_count_slave(cpu);
235     +
236     + /* The CPU is running and counters synchronised, now mark it online */
237     set_cpu_online(cpu, true);
238    
239     set_cpu_sibling_map(cpu);
240     @@ -383,8 +390,11 @@ asmlinkage void start_secondary(void)
241    
242     calculate_cpu_foreign_map();
243    
244     + /*
245     + * Notify boot CPU that we're up & online and it can safely return
246     + * from __cpu_up
247     + */
248     complete(&cpu_running);
249     - synchronise_count_slave(cpu);
250    
251     /*
252     * irq will be enabled in ->smp_finish(), enabling it too early
253     @@ -443,17 +453,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
254     {
255     mp_ops->boot_secondary(cpu, tidle);
256    
257     - /*
258     - * We must check for timeout here, as the CPU will not be marked
259     - * online until the counters are synchronised.
260     - */
261     - if (!wait_for_completion_timeout(&cpu_running,
262     + /* Wait for CPU to start and be ready to sync counters */
263     + if (!wait_for_completion_timeout(&cpu_starting,
264     msecs_to_jiffies(1000))) {
265     pr_crit("CPU%u: failed to start\n", cpu);
266     return -EIO;
267     }
268    
269     synchronise_count_master(cpu);
270     +
271     + /* Wait for CPU to finish startup & mark itself online before return */
272     + wait_for_completion(&cpu_running);
273     return 0;
274     }
275    
276     diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
277     index c28ff53c8da0..cdb5a191b9d5 100644
278     --- a/arch/mips/mm/uasm-micromips.c
279     +++ b/arch/mips/mm/uasm-micromips.c
280     @@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = {
281     [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
282     [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
283     [insn_ld] = {0, 0},
284     - [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
285     + [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
286     [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
287     [insn_lld] = {0, 0},
288     [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
289     diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
290     index 401776f92288..e45f05cc510d 100644
291     --- a/arch/mips/net/ebpf_jit.c
292     +++ b/arch/mips/net/ebpf_jit.c
293     @@ -1485,7 +1485,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
294     }
295     src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
296     if (src < 0)
297     - return dst;
298     + return src;
299     if (BPF_MODE(insn->code) == BPF_XADD) {
300     switch (BPF_SIZE(insn->code)) {
301     case BPF_W:
302     diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
303     index 5482928eea1b..abef812de7f8 100644
304     --- a/arch/powerpc/include/asm/code-patching.h
305     +++ b/arch/powerpc/include/asm/code-patching.h
306     @@ -83,16 +83,8 @@ static inline unsigned long ppc_function_entry(void *func)
307     * On PPC64 ABIv1 the function pointer actually points to the
308     * function's descriptor. The first entry in the descriptor is the
309     * address of the function text.
310     - *
311     - * However, we may also receive pointer to an assembly symbol. To
312     - * detect that, we first check if the function pointer we receive
313     - * already points to kernel/module text and we only dereference it
314     - * if it doesn't.
315     */
316     - if (kernel_text_address((unsigned long)func))
317     - return (unsigned long)func;
318     - else
319     - return ((func_descr_t *)func)->entry;
320     + return ((func_descr_t *)func)->entry;
321     #else
322     return (unsigned long)func;
323     #endif
324     diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
325     index 367494dc67d9..bebc3007a793 100644
326     --- a/arch/powerpc/kernel/kprobes.c
327     +++ b/arch/powerpc/kernel/kprobes.c
328     @@ -600,7 +600,12 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
329    
330     unsigned long arch_deref_entry_point(void *entry)
331     {
332     - return ppc_global_function_entry(entry);
333     +#ifdef PPC64_ELF_ABI_v1
334     + if (!kernel_text_address((unsigned long)entry))
335     + return ppc_global_function_entry(entry);
336     + else
337     +#endif
338     + return (unsigned long)entry;
339     }
340     NOKPROBE_SYMBOL(arch_deref_entry_point);
341    
342     diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
343     index cdf82492b770..836877e2da22 100644
344     --- a/arch/x86/kernel/cpu/Makefile
345     +++ b/arch/x86/kernel/cpu/Makefile
346     @@ -21,7 +21,7 @@ obj-y += common.o
347     obj-y += rdrand.o
348     obj-y += match.o
349     obj-y += bugs.o
350     -obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
351     +obj-y += aperfmperf.o
352    
353     obj-$(CONFIG_PROC_FS) += proc.o
354     obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
355     diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
356     index 0ee83321a313..957813e0180d 100644
357     --- a/arch/x86/kernel/cpu/aperfmperf.c
358     +++ b/arch/x86/kernel/cpu/aperfmperf.c
359     @@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
360     s64 time_delta = ktime_ms_delta(now, s->time);
361     unsigned long flags;
362    
363     - /* Don't bother re-computing within the cache threshold time. */
364     - if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
365     - return;
366     -
367     local_irq_save(flags);
368     rdmsrl(MSR_IA32_APERF, aperf);
369     rdmsrl(MSR_IA32_MPERF, mperf);
370     @@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void *dummy)
371    
372     unsigned int arch_freq_get_on_cpu(int cpu)
373     {
374     + s64 time_delta;
375     unsigned int khz;
376    
377     if (!cpu_khz)
378     @@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cpu)
379     if (!static_cpu_has(X86_FEATURE_APERFMPERF))
380     return 0;
381    
382     + /* Don't bother re-computing within the cache threshold time. */
383     + time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
384     + khz = per_cpu(samples.khz, cpu);
385     + if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
386     + return khz;
387     +
388     smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
389     khz = per_cpu(samples.khz, cpu);
390     if (khz)
391     diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
392     index 10cec43aac38..7f85b76f43bc 100644
393     --- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
394     +++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
395     @@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
396     static char mce_helper[128];
397     static char *mce_helper_argv[2] = { mce_helper, NULL };
398    
399     -#define mce_log_get_idx_check(p) \
400     -({ \
401     - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
402     - !lockdep_is_held(&mce_chrdev_read_mutex), \
403     - "suspicious mce_log_get_idx_check() usage"); \
404     - smp_load_acquire(&(p)); \
405     -})
406     -
407     /*
408     * Lockless MCE logging infrastructure.
409     * This avoids deadlocks on printk locks without having to break locks. Also
410     @@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
411     void *data)
412     {
413     struct mce *mce = (struct mce *)data;
414     - unsigned int next, entry;
415     -
416     - wmb();
417     - for (;;) {
418     - entry = mce_log_get_idx_check(mcelog.next);
419     - for (;;) {
420     -
421     - /*
422     - * When the buffer fills up discard new entries.
423     - * Assume that the earlier errors are the more
424     - * interesting ones:
425     - */
426     - if (entry >= MCE_LOG_LEN) {
427     - set_bit(MCE_OVERFLOW,
428     - (unsigned long *)&mcelog.flags);
429     - return NOTIFY_OK;
430     - }
431     - /* Old left over entry. Skip: */
432     - if (mcelog.entry[entry].finished) {
433     - entry++;
434     - continue;
435     - }
436     - break;
437     - }
438     - smp_rmb();
439     - next = entry + 1;
440     - if (cmpxchg(&mcelog.next, entry, next) == entry)
441     - break;
442     + unsigned int entry;
443     +
444     + mutex_lock(&mce_chrdev_read_mutex);
445     +
446     + entry = mcelog.next;
447     +
448     + /*
449     + * When the buffer fills up discard new entries. Assume that the
450     + * earlier errors are the more interesting ones:
451     + */
452     + if (entry >= MCE_LOG_LEN) {
453     + set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
454     + goto unlock;
455     }
456     +
457     + mcelog.next = entry + 1;
458     +
459     memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
460     - wmb();
461     mcelog.entry[entry].finished = 1;
462     - wmb();
463    
464     /* wake processes polling /dev/mcelog */
465     wake_up_interruptible(&mce_chrdev_wait);
466    
467     +unlock:
468     + mutex_unlock(&mce_chrdev_read_mutex);
469     +
470     return NOTIFY_OK;
471     }
472    
473     @@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
474     return 0;
475     }
476    
477     -static void collect_tscs(void *data)
478     -{
479     - unsigned long *cpu_tsc = (unsigned long *)data;
480     -
481     - cpu_tsc[smp_processor_id()] = rdtsc();
482     -}
483     -
484     static int mce_apei_read_done;
485    
486     /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
487     @@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
488     size_t usize, loff_t *off)
489     {
490     char __user *buf = ubuf;
491     - unsigned long *cpu_tsc;
492     - unsigned prev, next;
493     + unsigned next;
494     int i, err;
495    
496     - cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
497     - if (!cpu_tsc)
498     - return -ENOMEM;
499     -
500     mutex_lock(&mce_chrdev_read_mutex);
501    
502     if (!mce_apei_read_done) {
503     @@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
504     goto out;
505     }
506    
507     - next = mce_log_get_idx_check(mcelog.next);
508     -
509     /* Only supports full reads right now */
510     err = -EINVAL;
511     if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
512     goto out;
513    
514     + next = mcelog.next;
515     err = 0;
516     - prev = 0;
517     - do {
518     - for (i = prev; i < next; i++) {
519     - unsigned long start = jiffies;
520     - struct mce *m = &mcelog.entry[i];
521     -
522     - while (!m->finished) {
523     - if (time_after_eq(jiffies, start + 2)) {
524     - memset(m, 0, sizeof(*m));
525     - goto timeout;
526     - }
527     - cpu_relax();
528     - }
529     - smp_rmb();
530     - err |= copy_to_user(buf, m, sizeof(*m));
531     - buf += sizeof(*m);
532     -timeout:
533     - ;
534     - }
535     -
536     - memset(mcelog.entry + prev, 0,
537     - (next - prev) * sizeof(struct mce));
538     - prev = next;
539     - next = cmpxchg(&mcelog.next, prev, 0);
540     - } while (next != prev);
541     -
542     - synchronize_sched();
543    
544     - /*
545     - * Collect entries that were still getting written before the
546     - * synchronize.
547     - */
548     - on_each_cpu(collect_tscs, cpu_tsc, 1);
549     -
550     - for (i = next; i < MCE_LOG_LEN; i++) {
551     + for (i = 0; i < next; i++) {
552     struct mce *m = &mcelog.entry[i];
553    
554     - if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
555     - err |= copy_to_user(buf, m, sizeof(*m));
556     - smp_rmb();
557     - buf += sizeof(*m);
558     - memset(m, 0, sizeof(*m));
559     - }
560     + err |= copy_to_user(buf, m, sizeof(*m));
561     + buf += sizeof(*m);
562     }
563    
564     + memset(mcelog.entry, 0, next * sizeof(struct mce));
565     + mcelog.next = 0;
566     +
567     if (err)
568     err = -EFAULT;
569    
570     out:
571     mutex_unlock(&mce_chrdev_read_mutex);
572     - kfree(cpu_tsc);
573    
574     return err ? err : buf - ubuf;
575     }
576     diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
577     index 218f79825b3c..510e69596278 100644
578     --- a/arch/x86/kernel/cpu/proc.c
579     +++ b/arch/x86/kernel/cpu/proc.c
580     @@ -2,6 +2,7 @@
581     #include <linux/timex.h>
582     #include <linux/string.h>
583     #include <linux/seq_file.h>
584     +#include <linux/cpufreq.h>
585    
586     /*
587     * Get CPU information for use by the procfs.
588     @@ -75,9 +76,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
589     if (c->microcode)
590     seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
591    
592     - if (cpu_has(c, X86_FEATURE_TSC))
593     + if (cpu_has(c, X86_FEATURE_TSC)) {
594     + unsigned int freq = arch_freq_get_on_cpu(cpu);
595     +
596     + if (!freq)
597     + freq = cpufreq_quick_get(cpu);
598     + if (!freq)
599     + freq = cpu_khz;
600     seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
601     - cpu_khz / 1000, (cpu_khz % 1000));
602     + freq / 1000, (freq % 1000));
603     + }
604    
605     /* Cache size */
606     if (c->x86_cache_size >= 0)
607     diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
608     index d3d5523862c2..b49952b5a189 100644
609     --- a/drivers/block/virtio_blk.c
610     +++ b/drivers/block/virtio_blk.c
611     @@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
612     return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
613     }
614    
615     +#ifdef CONFIG_VIRTIO_BLK_SCSI
616     +static void virtblk_initialize_rq(struct request *req)
617     +{
618     + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
619     +
620     + scsi_req_init(&vbr->sreq);
621     +}
622     +#endif
623     +
624     static const struct blk_mq_ops virtio_mq_ops = {
625     .queue_rq = virtio_queue_rq,
626     .complete = virtblk_request_done,
627     .init_request = virtblk_init_request,
628     +#ifdef CONFIG_VIRTIO_BLK_SCSI
629     + .initialize_rq_fn = virtblk_initialize_rq,
630     +#endif
631     .map_queues = virtblk_map_queues,
632     };
633    
634     diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
635     index 31db356476f8..1086cf86354f 100644
636     --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
637     +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
638     @@ -93,6 +93,10 @@ static int uvd_v6_0_early_init(void *handle)
639     {
640     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
641    
642     + if (!(adev->flags & AMD_IS_APU) &&
643     + (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
644     + return -ENOENT;
645     +
646     uvd_v6_0_set_ring_funcs(adev);
647     uvd_v6_0_set_irq_funcs(adev);
648    
649     diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
650     index 90332f55cfba..cf81065e3c5a 100644
651     --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
652     +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
653     @@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
654     {
655     u32 tmp;
656    
657     - /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
658     if ((adev->asic_type == CHIP_FIJI) ||
659     - (adev->asic_type == CHIP_STONEY) ||
660     - (adev->asic_type == CHIP_POLARIS10) ||
661     - (adev->asic_type == CHIP_POLARIS11) ||
662     - (adev->asic_type == CHIP_POLARIS12))
663     + (adev->asic_type == CHIP_STONEY))
664     return AMDGPU_VCE_HARVEST_VCE1;
665    
666     - /* Tonga and CZ are dual or single pipe */
667     if (adev->flags & AMD_IS_APU)
668     tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
669     VCE_HARVEST_FUSE_MACRO__MASK) >>
670     @@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
671     case 3:
672     return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
673     default:
674     + if ((adev->asic_type == CHIP_POLARIS10) ||
675     + (adev->asic_type == CHIP_POLARIS11) ||
676     + (adev->asic_type == CHIP_POLARIS12))
677     + return AMDGPU_VCE_HARVEST_VCE1;
678     +
679     return 0;
680     }
681     }
682     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
683     index 61c313e21a91..169843de91cb 100644
684     --- a/drivers/gpu/drm/i915/intel_dp.c
685     +++ b/drivers/gpu/drm/i915/intel_dp.c
686     @@ -3687,9 +3687,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
687    
688     }
689    
690     - /* Read the eDP Display control capabilities registers */
691     - if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
692     - drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
693     + /*
694     + * Read the eDP display control registers.
695     + *
696     + * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
697     + * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
698     + * set, but require eDP 1.4+ detection (e.g. for supported link rates
699     + * method). The display control registers should read zero if they're
700     + * not supported anyway.
701     + */
702     + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
703     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
704     sizeof(intel_dp->edp_dpcd))
705     DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
706     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
707     index d93efb49a2e2..954e9454625e 100644
708     --- a/drivers/gpu/drm/i915/intel_drv.h
709     +++ b/drivers/gpu/drm/i915/intel_drv.h
710     @@ -495,7 +495,6 @@ struct intel_crtc_scaler_state {
711    
712     struct intel_pipe_wm {
713     struct intel_wm_level wm[5];
714     - struct intel_wm_level raw_wm[5];
715     uint32_t linetime;
716     bool fbc_wm_enabled;
717     bool pipe_enabled;
718     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
719     index 40b224b44d1b..1427cec843b9 100644
720     --- a/drivers/gpu/drm/i915/intel_pm.c
721     +++ b/drivers/gpu/drm/i915/intel_pm.c
722     @@ -2696,9 +2696,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
723     const struct intel_crtc *intel_crtc,
724     int level,
725     struct intel_crtc_state *cstate,
726     - struct intel_plane_state *pristate,
727     - struct intel_plane_state *sprstate,
728     - struct intel_plane_state *curstate,
729     + const struct intel_plane_state *pristate,
730     + const struct intel_plane_state *sprstate,
731     + const struct intel_plane_state *curstate,
732     struct intel_wm_level *result)
733     {
734     uint16_t pri_latency = dev_priv->wm.pri_latency[level];
735     @@ -3016,28 +3016,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
736     struct intel_pipe_wm *pipe_wm;
737     struct drm_device *dev = state->dev;
738     const struct drm_i915_private *dev_priv = to_i915(dev);
739     - struct intel_plane *intel_plane;
740     - struct intel_plane_state *pristate = NULL;
741     - struct intel_plane_state *sprstate = NULL;
742     - struct intel_plane_state *curstate = NULL;
743     + struct drm_plane *plane;
744     + const struct drm_plane_state *plane_state;
745     + const struct intel_plane_state *pristate = NULL;
746     + const struct intel_plane_state *sprstate = NULL;
747     + const struct intel_plane_state *curstate = NULL;
748     int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
749     struct ilk_wm_maximums max;
750    
751     pipe_wm = &cstate->wm.ilk.optimal;
752    
753     - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
754     - struct intel_plane_state *ps;
755     -
756     - ps = intel_atomic_get_existing_plane_state(state,
757     - intel_plane);
758     - if (!ps)
759     - continue;
760     + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
761     + const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
762    
763     - if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
764     + if (plane->type == DRM_PLANE_TYPE_PRIMARY)
765     pristate = ps;
766     - else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
767     + else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
768     sprstate = ps;
769     - else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
770     + else if (plane->type == DRM_PLANE_TYPE_CURSOR)
771     curstate = ps;
772     }
773    
774     @@ -3059,11 +3055,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
775     if (pipe_wm->sprites_scaled)
776     usable_level = 0;
777    
778     - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
779     - pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
780     -
781     memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
782     - pipe_wm->wm[0] = pipe_wm->raw_wm[0];
783     + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
784     + pristate, sprstate, curstate, &pipe_wm->wm[0]);
785    
786     if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
787     pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
788     @@ -3073,8 +3067,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
789    
790     ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
791    
792     - for (level = 1; level <= max_level; level++) {
793     - struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
794     + for (level = 1; level <= usable_level; level++) {
795     + struct intel_wm_level *wm = &pipe_wm->wm[level];
796    
797     ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
798     pristate, sprstate, curstate, wm);
799     @@ -3084,13 +3078,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
800     * register maximums since such watermarks are
801     * always invalid.
802     */
803     - if (level > usable_level)
804     - continue;
805     -
806     - if (ilk_validate_wm_level(level, &max, wm))
807     - pipe_wm->wm[level] = *wm;
808     - else
809     - usable_level = level;
810     + if (!ilk_validate_wm_level(level, &max, wm)) {
811     + memset(wm, 0, sizeof(*wm));
812     + break;
813     + }
814     }
815    
816     return 0;
817     diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
818     index b283fc90be1e..17a4a7b6cdbb 100644
819     --- a/drivers/irqchip/irq-mvebu-gicp.c
820     +++ b/drivers/irqchip/irq-mvebu-gicp.c
821     @@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
822     return -ENOMEM;
823    
824     gicp->dev = &pdev->dev;
825     + spin_lock_init(&gicp->spi_lock);
826    
827     gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
828     if (!gicp->res)
829     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
830     index e702d48bd023..81ba6e0d88d8 100644
831     --- a/fs/cifs/dir.c
832     +++ b/fs/cifs/dir.c
833     @@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
834     struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
835     int i;
836    
837     - if (unlikely(direntry->d_name.len >
838     + if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
839     + direntry->d_name.len >
840     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
841     return -ENAMETOOLONG;
842    
843     @@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
844    
845     rc = check_name(direntry, tcon);
846     if (rc)
847     - goto out_free_xid;
848     + goto out;
849    
850     server = tcon->ses->server;
851    
852     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
853     index 28d2753be094..a9e3b26e1b72 100644
854     --- a/fs/hugetlbfs/inode.c
855     +++ b/fs/hugetlbfs/inode.c
856     @@ -855,9 +855,12 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,
857     struct page *page)
858     {
859     struct inode *inode = mapping->host;
860     + pgoff_t index = page->index;
861    
862     remove_huge_page(page);
863     - hugetlb_fix_reserve_counts(inode);
864     + if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
865     + hugetlb_fix_reserve_counts(inode);
866     +
867     return 0;
868     }
869    
870     diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
871     index fb15a96df0b6..386aecce881d 100644
872     --- a/fs/ocfs2/alloc.c
873     +++ b/fs/ocfs2/alloc.c
874     @@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
875    
876     static int ocfs2_trim_extent(struct super_block *sb,
877     struct ocfs2_group_desc *gd,
878     - u32 start, u32 count)
879     + u64 group, u32 start, u32 count)
880     {
881     u64 discard, bcount;
882     + struct ocfs2_super *osb = OCFS2_SB(sb);
883    
884     bcount = ocfs2_clusters_to_blocks(sb, count);
885     - discard = le64_to_cpu(gd->bg_blkno) +
886     - ocfs2_clusters_to_blocks(sb, start);
887     + discard = ocfs2_clusters_to_blocks(sb, start);
888     +
889     + /*
890     + * For the first cluster group, the gd->bg_blkno is not at the start
891     + * of the group, but at an offset from the start. If we add it while
892     + * calculating discard for first group, we will wrongly start fstrim a
893     + * few blocks after the desried start block and the range can cross
894     + * over into the next cluster group. So, add it only if this is not
895     + * the first cluster group.
896     + */
897     + if (group != osb->first_cluster_group_blkno)
898     + discard += le64_to_cpu(gd->bg_blkno);
899    
900     trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
901    
902     @@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
903     }
904    
905     static int ocfs2_trim_group(struct super_block *sb,
906     - struct ocfs2_group_desc *gd,
907     + struct ocfs2_group_desc *gd, u64 group,
908     u32 start, u32 max, u32 minbits)
909     {
910     int ret = 0, count = 0, next;
911     @@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
912     next = ocfs2_find_next_bit(bitmap, max, start);
913    
914     if ((next - start) >= minbits) {
915     - ret = ocfs2_trim_extent(sb, gd,
916     + ret = ocfs2_trim_extent(sb, gd, group,
917     start, next - start);
918     if (ret < 0) {
919     mlog_errno(ret);
920     @@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
921     }
922    
923     gd = (struct ocfs2_group_desc *)gd_bh->b_data;
924     - cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
925     + cnt = ocfs2_trim_group(sb, gd, group,
926     + first_bit, last_bit, minlen);
927     brelse(gd_bh);
928     gd_bh = NULL;
929     if (cnt < 0) {
930     diff --git a/include/linux/swap.h b/include/linux/swap.h
931     index d83d28e53e62..a615eda102ae 100644
932     --- a/include/linux/swap.h
933     +++ b/include/linux/swap.h
934     @@ -246,6 +246,10 @@ struct swap_info_struct {
935     * both locks need hold, hold swap_lock
936     * first.
937     */
938     + spinlock_t cont_lock; /*
939     + * protect swap count continuation page
940     + * list.
941     + */
942     struct work_struct discard_work; /* discard worker */
943     struct swap_cluster_list discard_clusters; /* discard clusters list */
944     };
945     diff --git a/kernel/events/core.c b/kernel/events/core.c
946     index 7242a6e1ec76..95bbe99e4e6c 100644
947     --- a/kernel/events/core.c
948     +++ b/kernel/events/core.c
949     @@ -901,9 +901,11 @@ list_update_cgroup_event(struct perf_event *event,
950     cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
951     /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
952     if (add) {
953     + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
954     +
955     list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
956     - if (perf_cgroup_from_task(current, ctx) == event->cgrp)
957     - cpuctx->cgrp = event->cgrp;
958     + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
959     + cpuctx->cgrp = cgrp;
960     } else {
961     list_del(cpuctx_entry);
962     cpuctx->cgrp = NULL;
963     diff --git a/kernel/futex.c b/kernel/futex.c
964     index bf57ab12ffe8..a6639b346373 100644
965     --- a/kernel/futex.c
966     +++ b/kernel/futex.c
967     @@ -901,11 +901,27 @@ void exit_pi_state_list(struct task_struct *curr)
968     */
969     raw_spin_lock_irq(&curr->pi_lock);
970     while (!list_empty(head)) {
971     -
972     next = head->next;
973     pi_state = list_entry(next, struct futex_pi_state, list);
974     key = pi_state->key;
975     hb = hash_futex(&key);
976     +
977     + /*
978     + * We can race against put_pi_state() removing itself from the
979     + * list (a waiter going away). put_pi_state() will first
980     + * decrement the reference count and then modify the list, so
981     + * its possible to see the list entry but fail this reference
982     + * acquire.
983     + *
984     + * In that case; drop the locks to let put_pi_state() make
985     + * progress and retry the loop.
986     + */
987     + if (!atomic_inc_not_zero(&pi_state->refcount)) {
988     + raw_spin_unlock_irq(&curr->pi_lock);
989     + cpu_relax();
990     + raw_spin_lock_irq(&curr->pi_lock);
991     + continue;
992     + }
993     raw_spin_unlock_irq(&curr->pi_lock);
994    
995     spin_lock(&hb->lock);
996     @@ -916,8 +932,10 @@ void exit_pi_state_list(struct task_struct *curr)
997     * task still owns the PI-state:
998     */
999     if (head->next != next) {
1000     + /* retain curr->pi_lock for the loop invariant */
1001     raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1002     spin_unlock(&hb->lock);
1003     + put_pi_state(pi_state);
1004     continue;
1005     }
1006    
1007     @@ -925,9 +943,8 @@ void exit_pi_state_list(struct task_struct *curr)
1008     WARN_ON(list_empty(&pi_state->list));
1009     list_del_init(&pi_state->list);
1010     pi_state->owner = NULL;
1011     - raw_spin_unlock(&curr->pi_lock);
1012    
1013     - get_pi_state(pi_state);
1014     + raw_spin_unlock(&curr->pi_lock);
1015     raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1016     spin_unlock(&hb->lock);
1017    
1018     diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
1019     index 0bd8a611eb83..fef5d2e114be 100644
1020     --- a/lib/asn1_decoder.c
1021     +++ b/lib/asn1_decoder.c
1022     @@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
1023     if (unlikely(len > datalen - dp))
1024     goto data_overrun_error;
1025     }
1026     + } else {
1027     + if (unlikely(len > datalen - dp))
1028     + goto data_overrun_error;
1029     }
1030    
1031     if (flags & FLAG_CONS) {
1032     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1033     index 31e207cb399b..011725849f52 100644
1034     --- a/mm/hugetlb.c
1035     +++ b/mm/hugetlb.c
1036     @@ -3977,6 +3977,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
1037     unsigned long src_addr,
1038     struct page **pagep)
1039     {
1040     + struct address_space *mapping;
1041     + pgoff_t idx;
1042     + unsigned long size;
1043     int vm_shared = dst_vma->vm_flags & VM_SHARED;
1044     struct hstate *h = hstate_vma(dst_vma);
1045     pte_t _dst_pte;
1046     @@ -4014,13 +4017,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
1047     __SetPageUptodate(page);
1048     set_page_huge_active(page);
1049    
1050     + mapping = dst_vma->vm_file->f_mapping;
1051     + idx = vma_hugecache_offset(h, dst_vma, dst_addr);
1052     +
1053     /*
1054     * If shared, add to page cache
1055     */
1056     if (vm_shared) {
1057     - struct address_space *mapping = dst_vma->vm_file->f_mapping;
1058     - pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
1059     + size = i_size_read(mapping->host) >> huge_page_shift(h);
1060     + ret = -EFAULT;
1061     + if (idx >= size)
1062     + goto out_release_nounlock;
1063    
1064     + /*
1065     + * Serialization between remove_inode_hugepages() and
1066     + * huge_add_to_page_cache() below happens through the
1067     + * hugetlb_fault_mutex_table that here must be hold by
1068     + * the caller.
1069     + */
1070     ret = huge_add_to_page_cache(page, mapping, idx);
1071     if (ret)
1072     goto out_release_nounlock;
1073     @@ -4029,6 +4043,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
1074     ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
1075     spin_lock(ptl);
1076    
1077     + /*
1078     + * Recheck the i_size after holding PT lock to make sure not
1079     + * to leave any page mapped (as page_mapped()) beyond the end
1080     + * of the i_size (remove_inode_hugepages() is strict about
1081     + * enforcing that). If we bail out here, we'll also leave a
1082     + * page in the radix tree in the vm_shared case beyond the end
1083     + * of the i_size, but remove_inode_hugepages() will take care
1084     + * of it as soon as we drop the hugetlb_fault_mutex_table.
1085     + */
1086     + size = i_size_read(mapping->host) >> huge_page_shift(h);
1087     + ret = -EFAULT;
1088     + if (idx >= size)
1089     + goto out_release_unlock;
1090     +
1091     ret = -EEXIST;
1092     if (!huge_pte_none(huge_ptep_get(dst_pte)))
1093     goto out_release_unlock;
1094     diff --git a/mm/swapfile.c b/mm/swapfile.c
1095     index a8952b6563c6..3191465b0ccf 100644
1096     --- a/mm/swapfile.c
1097     +++ b/mm/swapfile.c
1098     @@ -2635,6 +2635,7 @@ static struct swap_info_struct *alloc_swap_info(void)
1099     p->flags = SWP_USED;
1100     spin_unlock(&swap_lock);
1101     spin_lock_init(&p->lock);
1102     + spin_lock_init(&p->cont_lock);
1103    
1104     return p;
1105     }
1106     @@ -3307,6 +3308,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
1107     head = vmalloc_to_page(si->swap_map + offset);
1108     offset &= ~PAGE_MASK;
1109    
1110     + spin_lock(&si->cont_lock);
1111     /*
1112     * Page allocation does not initialize the page's lru field,
1113     * but it does always reset its private field.
1114     @@ -3326,7 +3328,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
1115     * a continuation page, free our allocation and use this one.
1116     */
1117     if (!(count & COUNT_CONTINUED))
1118     - goto out;
1119     + goto out_unlock_cont;
1120    
1121     map = kmap_atomic(list_page) + offset;
1122     count = *map;
1123     @@ -3337,11 +3339,13 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
1124     * free our allocation and use this one.
1125     */
1126     if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
1127     - goto out;
1128     + goto out_unlock_cont;
1129     }
1130    
1131     list_add_tail(&page->lru, &head->lru);
1132     page = NULL; /* now it's attached, don't free it */
1133     +out_unlock_cont:
1134     + spin_unlock(&si->cont_lock);
1135     out:
1136     unlock_cluster(ci);
1137     spin_unlock(&si->lock);
1138     @@ -3366,6 +3370,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
1139     struct page *head;
1140     struct page *page;
1141     unsigned char *map;
1142     + bool ret;
1143    
1144     head = vmalloc_to_page(si->swap_map + offset);
1145     if (page_private(head) != SWP_CONTINUED) {
1146     @@ -3373,6 +3378,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
1147     return false; /* need to add count continuation */
1148     }
1149    
1150     + spin_lock(&si->cont_lock);
1151     offset &= ~PAGE_MASK;
1152     page = list_entry(head->lru.next, struct page, lru);
1153     map = kmap_atomic(page) + offset;
1154     @@ -3393,8 +3399,10 @@ static bool swap_count_continued(struct swap_info_struct *si,
1155     if (*map == SWAP_CONT_MAX) {
1156     kunmap_atomic(map);
1157     page = list_entry(page->lru.next, struct page, lru);
1158     - if (page == head)
1159     - return false; /* add count continuation */
1160     + if (page == head) {
1161     + ret = false; /* add count continuation */
1162     + goto out;
1163     + }
1164     map = kmap_atomic(page) + offset;
1165     init_map: *map = 0; /* we didn't zero the page */
1166     }
1167     @@ -3407,7 +3415,7 @@ init_map: *map = 0; /* we didn't zero the page */
1168     kunmap_atomic(map);
1169     page = list_entry(page->lru.prev, struct page, lru);
1170     }
1171     - return true; /* incremented */
1172     + ret = true; /* incremented */
1173    
1174     } else { /* decrementing */
1175     /*
1176     @@ -3433,8 +3441,11 @@ init_map: *map = 0; /* we didn't zero the page */
1177     kunmap_atomic(map);
1178     page = list_entry(page->lru.prev, struct page, lru);
1179     }
1180     - return count == COUNT_CONTINUED;
1181     + ret = count == COUNT_CONTINUED;
1182     }
1183     +out:
1184     + spin_unlock(&si->cont_lock);
1185     + return ret;
1186     }
1187    
1188     /*
1189     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
1190     index 06173b091a74..c04032302a25 100644
1191     --- a/security/keys/keyring.c
1192     +++ b/security/keys/keyring.c
1193     @@ -459,34 +459,33 @@ static long keyring_read(const struct key *keyring,
1194     char __user *buffer, size_t buflen)
1195     {
1196     struct keyring_read_iterator_context ctx;
1197     - unsigned long nr_keys;
1198     - int ret;
1199     + long ret;
1200    
1201     kenter("{%d},,%zu", key_serial(keyring), buflen);
1202    
1203     if (buflen & (sizeof(key_serial_t) - 1))
1204     return -EINVAL;
1205    
1206     - nr_keys = keyring->keys.nr_leaves_on_tree;
1207     - if (nr_keys == 0)
1208     - return 0;
1209     -
1210     - /* Calculate how much data we could return */
1211     - if (!buffer || !buflen)
1212     - return nr_keys * sizeof(key_serial_t);
1213     -
1214     - /* Copy the IDs of the subscribed keys into the buffer */
1215     - ctx.buffer = (key_serial_t __user *)buffer;
1216     - ctx.buflen = buflen;
1217     - ctx.count = 0;
1218     - ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
1219     - if (ret < 0) {
1220     - kleave(" = %d [iterate]", ret);
1221     - return ret;
1222     + /* Copy as many key IDs as fit into the buffer */
1223     + if (buffer && buflen) {
1224     + ctx.buffer = (key_serial_t __user *)buffer;
1225     + ctx.buflen = buflen;
1226     + ctx.count = 0;
1227     + ret = assoc_array_iterate(&keyring->keys,
1228     + keyring_read_iterator, &ctx);
1229     + if (ret < 0) {
1230     + kleave(" = %ld [iterate]", ret);
1231     + return ret;
1232     + }
1233     }
1234    
1235     - kleave(" = %zu [ok]", ctx.count);
1236     - return ctx.count;
1237     + /* Return the size of the buffer needed */
1238     + ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
1239     + if (ret <= buflen)
1240     + kleave("= %ld [ok]", ret);
1241     + else
1242     + kleave("= %ld [buffer too small]", ret);
1243     + return ret;
1244     }
1245    
1246     /*
1247     diff --git a/security/keys/trusted.c b/security/keys/trusted.c
1248     index bd85315cbfeb..98aa89ff7bfd 100644
1249     --- a/security/keys/trusted.c
1250     +++ b/security/keys/trusted.c
1251     @@ -1147,20 +1147,21 @@ static long trusted_read(const struct key *key, char __user *buffer,
1252     p = dereference_key_locked(key);
1253     if (!p)
1254     return -EINVAL;
1255     - if (!buffer || buflen <= 0)
1256     - return 2 * p->blob_len;
1257     - ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
1258     - if (!ascii_buf)
1259     - return -ENOMEM;
1260    
1261     - bufp = ascii_buf;
1262     - for (i = 0; i < p->blob_len; i++)
1263     - bufp = hex_byte_pack(bufp, p->blob[i]);
1264     - if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
1265     + if (buffer && buflen >= 2 * p->blob_len) {
1266     + ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
1267     + if (!ascii_buf)
1268     + return -ENOMEM;
1269     +
1270     + bufp = ascii_buf;
1271     + for (i = 0; i < p->blob_len; i++)
1272     + bufp = hex_byte_pack(bufp, p->blob[i]);
1273     + if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
1274     + kzfree(ascii_buf);
1275     + return -EFAULT;
1276     + }
1277     kzfree(ascii_buf);
1278     - return -EFAULT;
1279     }
1280     - kzfree(ascii_buf);
1281     return 2 * p->blob_len;
1282     }
1283    
1284     diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
1285     index 6c9cba2166d9..d10c780dfd54 100644
1286     --- a/sound/core/seq/seq_clientmgr.c
1287     +++ b/sound/core/seq/seq_clientmgr.c
1288     @@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
1289     if (atomic)
1290     read_lock(&grp->list_lock);
1291     else
1292     - down_read(&grp->list_mutex);
1293     + down_read_nested(&grp->list_mutex, hop);
1294     list_for_each_entry(subs, &grp->list_head, src_list) {
1295     /* both ports ready? */
1296     if (atomic_read(&subs->ref_count) != 2)
1297     diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
1298     index 6a437eb66115..59127b6ef39e 100644
1299     --- a/sound/core/timer_compat.c
1300     +++ b/sound/core/timer_compat.c
1301     @@ -133,7 +133,8 @@ enum {
1302     #endif /* CONFIG_X86_X32 */
1303     };
1304    
1305     -static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
1306     +static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
1307     + unsigned long arg)
1308     {
1309     void __user *argp = compat_ptr(arg);
1310    
1311     @@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
1312     case SNDRV_TIMER_IOCTL_PAUSE:
1313     case SNDRV_TIMER_IOCTL_PAUSE_OLD:
1314     case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
1315     - return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
1316     + return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
1317     case SNDRV_TIMER_IOCTL_GPARAMS32:
1318     return snd_timer_user_gparams_compat(file, argp);
1319     case SNDRV_TIMER_IOCTL_INFO32:
1320     @@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
1321     }
1322     return -ENOIOCTLCMD;
1323     }
1324     +
1325     +static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
1326     + unsigned long arg)
1327     +{
1328     + struct snd_timer_user *tu = file->private_data;
1329     + long ret;
1330     +
1331     + mutex_lock(&tu->ioctl_lock);
1332     + ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
1333     + mutex_unlock(&tu->ioctl_lock);
1334     + return ret;
1335     +}
1336     diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
1337     index 2c1bd2763864..6758f789b712 100644
1338     --- a/sound/soc/codecs/adau17x1.c
1339     +++ b/sound/soc/codecs/adau17x1.c
1340     @@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
1341     return 0;
1342     }
1343    
1344     +static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
1345     + struct snd_kcontrol *kcontrol, int event)
1346     +{
1347     + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
1348     + struct adau *adau = snd_soc_codec_get_drvdata(codec);
1349     +
1350     + /*
1351     + * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
1352     + * avoid losing SNR (workaround from ADI). This must be done after
1353     + * the ADC(s) have been enabled. According to the data sheet, it is
1354     + * normally illegal to set this bit when the sampling rate is 96 kHz,
1355     + * but according to ADI it is acceptable for this workaround.
1356     + */
1357     + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
1358     + ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
1359     + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
1360     + ADAU17X1_CONVERTER0_ADOSR, 0);
1361     +
1362     + return 0;
1363     +}
1364     +
1365     static const char * const adau17x1_mono_stereo_text[] = {
1366     "Stereo",
1367     "Mono Left Channel (L+R)",
1368     @@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
1369     SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
1370     &adau17x1_dac_mode_mux),
1371    
1372     - SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
1373     + SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
1374     + adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
1375     SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
1376     SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
1377     SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
1378     diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
1379     index bf04b7efee40..db350035fad7 100644
1380     --- a/sound/soc/codecs/adau17x1.h
1381     +++ b/sound/soc/codecs/adau17x1.h
1382     @@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
1383    
1384     #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
1385    
1386     +#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
1387     +
1388    
1389     #endif
1390     diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
1391     index aa6b68db80b4..b606f1643fe5 100644
1392     --- a/virt/kvm/arm/vgic/vgic-its.c
1393     +++ b/virt/kvm/arm/vgic/vgic-its.c
1394     @@ -1803,37 +1803,33 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
1395     static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
1396     int start_id, entry_fn_t fn, void *opaque)
1397     {
1398     - void *entry = kzalloc(esz, GFP_KERNEL);
1399     struct kvm *kvm = its->dev->kvm;
1400     unsigned long len = size;
1401     int id = start_id;
1402     gpa_t gpa = base;
1403     + char entry[esz];
1404     int ret;
1405    
1406     + memset(entry, 0, esz);
1407     +
1408     while (len > 0) {
1409     int next_offset;
1410     size_t byte_offset;
1411    
1412     ret = kvm_read_guest(kvm, gpa, entry, esz);
1413     if (ret)
1414     - goto out;
1415     + return ret;
1416    
1417     next_offset = fn(its, id, entry, opaque);
1418     - if (next_offset <= 0) {
1419     - ret = next_offset;
1420     - goto out;
1421     - }
1422     + if (next_offset <= 0)
1423     + return next_offset;
1424    
1425     byte_offset = next_offset * esz;
1426     id += next_offset;
1427     gpa += byte_offset;
1428     len -= byte_offset;
1429     }
1430     - ret = 1;
1431     -
1432     -out:
1433     - kfree(entry);
1434     - return ret;
1435     + return 1;
1436     }
1437    
1438     /**