Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0186-4.9.87-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 77640 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index e918d25e95bb..3043937a65d1 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 86
9     +SUBLEVEL = 87
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
14     index 4f2c5ec75714..e262fa9ef334 100644
15     --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
16     +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
17     @@ -97,6 +97,8 @@
18     };
19    
20     &i2c1 {
21     + pinctrl-names = "default";
22     + pinctrl-0 = <&i2c1_pins>;
23     clock-frequency = <2600000>;
24    
25     twl: twl@48 {
26     @@ -215,7 +217,12 @@
27     >;
28     };
29    
30     -
31     + i2c1_pins: pinmux_i2c1_pins {
32     + pinctrl-single,pins = <
33     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
34     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
35     + >;
36     + };
37     };
38    
39     &omap3_pmx_wkup {
40     diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
41     index efe53998c961..08f0a35dc0d1 100644
42     --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
43     +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
44     @@ -100,6 +100,8 @@
45     };
46    
47     &i2c1 {
48     + pinctrl-names = "default";
49     + pinctrl-0 = <&i2c1_pins>;
50     clock-frequency = <2600000>;
51    
52     twl: twl@48 {
53     @@ -207,6 +209,12 @@
54     OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
55     >;
56     };
57     + i2c1_pins: pinmux_i2c1_pins {
58     + pinctrl-single,pins = <
59     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
60     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
61     + >;
62     + };
63     };
64    
65     &uart2 {
66     diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
67     index 92eab1d51785..61049216e4d5 100644
68     --- a/arch/arm/kvm/hyp/Makefile
69     +++ b/arch/arm/kvm/hyp/Makefile
70     @@ -6,6 +6,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
71    
72     KVM=../../../../virt/kvm
73    
74     +CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
75     +
76     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
77     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
78     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
79     @@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
80     obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
81     obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
82     obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
83     +CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
84     +
85     obj-$(CONFIG_KVM_ARM_HOST) += entry.o
86     obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
87     obj-$(CONFIG_KVM_ARM_HOST) += switch.o
88     +CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
89     obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
90     diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
91     index 111bda8cdebd..be4b8b0a40ad 100644
92     --- a/arch/arm/kvm/hyp/banked-sr.c
93     +++ b/arch/arm/kvm/hyp/banked-sr.c
94     @@ -20,6 +20,10 @@
95    
96     #include <asm/kvm_hyp.h>
97    
98     +/*
99     + * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
100     + * trick the assembler.
101     + */
102     __asm__(".arch_extension virt");
103    
104     void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
105     diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
106     index 541647f57192..895c0746fe50 100644
107     --- a/arch/arm/mach-mvebu/Kconfig
108     +++ b/arch/arm/mach-mvebu/Kconfig
109     @@ -42,7 +42,7 @@ config MACH_ARMADA_375
110     depends on ARCH_MULTI_V7
111     select ARMADA_370_XP_IRQ
112     select ARM_ERRATA_720789
113     - select ARM_ERRATA_753970
114     + select PL310_ERRATA_753970
115     select ARM_GIC
116     select ARMADA_375_CLK
117     select HAVE_ARM_SCU
118     @@ -58,7 +58,7 @@ config MACH_ARMADA_38X
119     bool "Marvell Armada 380/385 boards"
120     depends on ARCH_MULTI_V7
121     select ARM_ERRATA_720789
122     - select ARM_ERRATA_753970
123     + select PL310_ERRATA_753970
124     select ARM_GIC
125     select ARMADA_370_XP_IRQ
126     select ARMADA_38X_CLK
127     diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
128     index d8199e12fb6e..b47a26f4290c 100644
129     --- a/arch/arm64/net/bpf_jit_comp.c
130     +++ b/arch/arm64/net/bpf_jit_comp.c
131     @@ -234,8 +234,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
132     off = offsetof(struct bpf_array, map.max_entries);
133     emit_a64_mov_i64(tmp, off, ctx);
134     emit(A64_LDR32(tmp, r2, tmp), ctx);
135     + emit(A64_MOV(0, r3, r3), ctx);
136     emit(A64_CMP(0, r3, tmp), ctx);
137     - emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
138     + emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
139    
140     /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
141     * goto out;
142     @@ -243,7 +244,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
143     */
144     emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
145     emit(A64_CMP(1, tcc, tmp), ctx);
146     - emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
147     + emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
148     emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
149    
150     /* prog = array->ptrs[index];
151     diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
152     index 1d8c24dc04d4..88290d32b956 100644
153     --- a/arch/parisc/include/asm/cacheflush.h
154     +++ b/arch/parisc/include/asm/cacheflush.h
155     @@ -25,6 +25,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
156     void flush_kernel_icache_range_asm(unsigned long, unsigned long);
157     void flush_user_dcache_range_asm(unsigned long, unsigned long);
158     void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
159     +void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
160     void flush_kernel_dcache_page_asm(void *);
161     void flush_kernel_icache_page(void *);
162     void flush_user_dcache_range(unsigned long, unsigned long);
163     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
164     index df757c9675e6..025afe5f17a7 100644
165     --- a/arch/parisc/kernel/cache.c
166     +++ b/arch/parisc/kernel/cache.c
167     @@ -464,10 +464,10 @@ EXPORT_SYMBOL(copy_user_page);
168     int __flush_tlb_range(unsigned long sid, unsigned long start,
169     unsigned long end)
170     {
171     - unsigned long flags, size;
172     + unsigned long flags;
173    
174     - size = (end - start);
175     - if (size >= parisc_tlb_flush_threshold) {
176     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
177     + end - start >= parisc_tlb_flush_threshold) {
178     flush_tlb_all();
179     return 1;
180     }
181     @@ -538,13 +538,11 @@ void flush_cache_mm(struct mm_struct *mm)
182     struct vm_area_struct *vma;
183     pgd_t *pgd;
184    
185     - /* Flush the TLB to avoid speculation if coherency is required. */
186     - if (parisc_requires_coherency())
187     - flush_tlb_all();
188     -
189     /* Flushing the whole cache on each cpu takes forever on
190     rp3440, etc. So, avoid it if the mm isn't too big. */
191     - if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
192     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
193     + mm_total_size(mm) >= parisc_cache_flush_threshold) {
194     + flush_tlb_all();
195     flush_cache_all();
196     return;
197     }
198     @@ -552,9 +550,9 @@ void flush_cache_mm(struct mm_struct *mm)
199     if (mm->context == mfsp(3)) {
200     for (vma = mm->mmap; vma; vma = vma->vm_next) {
201     flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
202     - if ((vma->vm_flags & VM_EXEC) == 0)
203     - continue;
204     - flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
205     + if (vma->vm_flags & VM_EXEC)
206     + flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
207     + flush_tlb_range(vma, vma->vm_start, vma->vm_end);
208     }
209     return;
210     }
211     @@ -598,14 +596,9 @@ flush_user_icache_range(unsigned long start, unsigned long end)
212     void flush_cache_range(struct vm_area_struct *vma,
213     unsigned long start, unsigned long end)
214     {
215     - BUG_ON(!vma->vm_mm->context);
216     -
217     - /* Flush the TLB to avoid speculation if coherency is required. */
218     - if (parisc_requires_coherency())
219     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
220     + end - start >= parisc_cache_flush_threshold) {
221     flush_tlb_range(vma, start, end);
222     -
223     - if ((end - start) >= parisc_cache_flush_threshold
224     - || vma->vm_mm->context != mfsp(3)) {
225     flush_cache_all();
226     return;
227     }
228     @@ -613,6 +606,7 @@ void flush_cache_range(struct vm_area_struct *vma,
229     flush_user_dcache_range_asm(start, end);
230     if (vma->vm_flags & VM_EXEC)
231     flush_user_icache_range_asm(start, end);
232     + flush_tlb_range(vma, start, end);
233     }
234    
235     void
236     @@ -621,8 +615,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
237     BUG_ON(!vma->vm_mm->context);
238    
239     if (pfn_valid(pfn)) {
240     - if (parisc_requires_coherency())
241     - flush_tlb_page(vma, vmaddr);
242     + flush_tlb_page(vma, vmaddr);
243     __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
244     }
245     }
246     @@ -630,21 +623,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
247     void flush_kernel_vmap_range(void *vaddr, int size)
248     {
249     unsigned long start = (unsigned long)vaddr;
250     + unsigned long end = start + size;
251    
252     - if ((unsigned long)size > parisc_cache_flush_threshold)
253     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
254     + (unsigned long)size >= parisc_cache_flush_threshold) {
255     + flush_tlb_kernel_range(start, end);
256     flush_data_cache();
257     - else
258     - flush_kernel_dcache_range_asm(start, start + size);
259     + return;
260     + }
261     +
262     + flush_kernel_dcache_range_asm(start, end);
263     + flush_tlb_kernel_range(start, end);
264     }
265     EXPORT_SYMBOL(flush_kernel_vmap_range);
266    
267     void invalidate_kernel_vmap_range(void *vaddr, int size)
268     {
269     unsigned long start = (unsigned long)vaddr;
270     + unsigned long end = start + size;
271    
272     - if ((unsigned long)size > parisc_cache_flush_threshold)
273     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
274     + (unsigned long)size >= parisc_cache_flush_threshold) {
275     + flush_tlb_kernel_range(start, end);
276     flush_data_cache();
277     - else
278     - flush_kernel_dcache_range_asm(start, start + size);
279     + return;
280     + }
281     +
282     + purge_kernel_dcache_range_asm(start, end);
283     + flush_tlb_kernel_range(start, end);
284     }
285     EXPORT_SYMBOL(invalidate_kernel_vmap_range);
286     diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
287     index 2d40c4ff3f69..67b0f7532e83 100644
288     --- a/arch/parisc/kernel/pacache.S
289     +++ b/arch/parisc/kernel/pacache.S
290     @@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
291     .procend
292     ENDPROC_CFI(flush_kernel_dcache_range_asm)
293    
294     +ENTRY_CFI(purge_kernel_dcache_range_asm)
295     + .proc
296     + .callinfo NO_CALLS
297     + .entry
298     +
299     + ldil L%dcache_stride, %r1
300     + ldw R%dcache_stride(%r1), %r23
301     + ldo -1(%r23), %r21
302     + ANDCM %r26, %r21, %r26
303     +
304     +1: cmpb,COND(<<),n %r26, %r25,1b
305     + pdc,m %r23(%r26)
306     +
307     + sync
308     + syncdma
309     + bv %r0(%r2)
310     + nop
311     + .exit
312     +
313     + .procend
314     +ENDPROC_CFI(purge_kernel_dcache_range_asm)
315     +
316     ENTRY_CFI(flush_user_icache_range_asm)
317     .proc
318     .callinfo NO_CALLS
319     diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
320     index 0fe98a567125..be9d968244ad 100644
321     --- a/arch/powerpc/net/bpf_jit_comp64.c
322     +++ b/arch/powerpc/net/bpf_jit_comp64.c
323     @@ -245,6 +245,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
324     * goto out;
325     */
326     PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
327     + PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
328     PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
329     PPC_BCC(COND_GE, out);
330    
331     diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
332     index 8b272a08d1a8..e2e09347ee3c 100644
333     --- a/arch/x86/include/asm/mmu.h
334     +++ b/arch/x86/include/asm/mmu.h
335     @@ -3,12 +3,18 @@
336    
337     #include <linux/spinlock.h>
338     #include <linux/mutex.h>
339     +#include <linux/atomic.h>
340    
341     /*
342     - * The x86 doesn't have a mmu context, but
343     - * we put the segment information here.
344     + * x86 has arch-specific MMU state beyond what lives in mm_struct.
345     */
346     typedef struct {
347     + /*
348     + * ctx_id uniquely identifies this mm_struct. A ctx_id will never
349     + * be reused, and zero is not a valid ctx_id.
350     + */
351     + u64 ctx_id;
352     +
353     #ifdef CONFIG_MODIFY_LDT_SYSCALL
354     struct ldt_struct *ldt;
355     #endif
356     @@ -33,6 +39,11 @@ typedef struct {
357     #endif
358     } mm_context_t;
359    
360     +#define INIT_MM_CONTEXT(mm) \
361     + .context = { \
362     + .ctx_id = 1, \
363     + }
364     +
365     void leave_mm(int cpu);
366    
367     #endif /* _ASM_X86_MMU_H */
368     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
369     index d23e35584f15..5a295bb97103 100644
370     --- a/arch/x86/include/asm/mmu_context.h
371     +++ b/arch/x86/include/asm/mmu_context.h
372     @@ -12,6 +12,9 @@
373     #include <asm/tlbflush.h>
374     #include <asm/paravirt.h>
375     #include <asm/mpx.h>
376     +
377     +extern atomic64_t last_mm_ctx_id;
378     +
379     #ifndef CONFIG_PARAVIRT
380     static inline void paravirt_activate_mm(struct mm_struct *prev,
381     struct mm_struct *next)
382     @@ -106,6 +109,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
383     static inline int init_new_context(struct task_struct *tsk,
384     struct mm_struct *mm)
385     {
386     + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
387     +
388     #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
389     if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
390     /* pkey 0 is the default and always allocated */
391     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
392     index 76b058533e47..81a1be326571 100644
393     --- a/arch/x86/include/asm/nospec-branch.h
394     +++ b/arch/x86/include/asm/nospec-branch.h
395     @@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
396     }
397    
398     #endif /* __ASSEMBLY__ */
399     +
400     +/*
401     + * Below is used in the eBPF JIT compiler and emits the byte sequence
402     + * for the following assembly:
403     + *
404     + * With retpolines configured:
405     + *
406     + * callq do_rop
407     + * spec_trap:
408     + * pause
409     + * lfence
410     + * jmp spec_trap
411     + * do_rop:
412     + * mov %rax,(%rsp)
413     + * retq
414     + *
415     + * Without retpolines configured:
416     + *
417     + * jmp *%rax
418     + */
419     +#ifdef CONFIG_RETPOLINE
420     +# define RETPOLINE_RAX_BPF_JIT_SIZE 17
421     +# define RETPOLINE_RAX_BPF_JIT() \
422     + EMIT1_off32(0xE8, 7); /* callq do_rop */ \
423     + /* spec_trap: */ \
424     + EMIT2(0xF3, 0x90); /* pause */ \
425     + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
426     + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
427     + /* do_rop: */ \
428     + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
429     + EMIT1(0xC3); /* retq */
430     +#else
431     +# define RETPOLINE_RAX_BPF_JIT_SIZE 2
432     +# define RETPOLINE_RAX_BPF_JIT() \
433     + EMIT2(0xFF, 0xE0); /* jmp *%rax */
434     +#endif
435     +
436     #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
437     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
438     index 94146f665a3c..99185a064978 100644
439     --- a/arch/x86/include/asm/tlbflush.h
440     +++ b/arch/x86/include/asm/tlbflush.h
441     @@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
442     struct tlb_state {
443     struct mm_struct *active_mm;
444     int state;
445     + /* last user mm's ctx id */
446     + u64 last_ctx_id;
447    
448     /*
449     * Access to this CR4 shadow and to H/W CR4 is protected by
450     diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
451     index b5229abd1629..4922ab66fd29 100644
452     --- a/arch/x86/kernel/apic/vector.c
453     +++ b/arch/x86/kernel/apic/vector.c
454     @@ -93,8 +93,12 @@ static struct apic_chip_data *alloc_apic_chip_data(int node)
455     return NULL;
456     }
457    
458     -static void free_apic_chip_data(struct apic_chip_data *data)
459     +static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data)
460     {
461     +#ifdef CONFIG_X86_IO_APIC
462     + if (virq < nr_legacy_irqs())
463     + legacy_irq_data[virq] = NULL;
464     +#endif
465     if (data) {
466     free_cpumask_var(data->domain);
467     free_cpumask_var(data->old_domain);
468     @@ -318,11 +322,7 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
469     apic_data = irq_data->chip_data;
470     irq_domain_reset_irq_data(irq_data);
471     raw_spin_unlock_irqrestore(&vector_lock, flags);
472     - free_apic_chip_data(apic_data);
473     -#ifdef CONFIG_X86_IO_APIC
474     - if (virq + i < nr_legacy_irqs())
475     - legacy_irq_data[virq + i] = NULL;
476     -#endif
477     + free_apic_chip_data(virq + i, apic_data);
478     }
479     }
480     }
481     @@ -363,7 +363,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
482     err = assign_irq_vector_policy(virq + i, node, data, info);
483     if (err) {
484     irq_data->chip_data = NULL;
485     - free_apic_chip_data(data);
486     + free_apic_chip_data(virq + i, data);
487     goto error;
488     }
489     }
490     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
491     index be644afab1bb..24d2a3ee743f 100644
492     --- a/arch/x86/kvm/svm.c
493     +++ b/arch/x86/kvm/svm.c
494     @@ -44,6 +44,7 @@
495     #include <asm/debugreg.h>
496     #include <asm/kvm_para.h>
497     #include <asm/irq_remapping.h>
498     +#include <asm/microcode.h>
499     #include <asm/nospec-branch.h>
500    
501     #include <asm/virtext.h>
502     @@ -4919,7 +4920,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
503     * being speculatively taken.
504     */
505     if (svm->spec_ctrl)
506     - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
507     + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
508    
509     asm volatile (
510     "push %%" _ASM_BP "; \n\t"
511     @@ -5028,11 +5029,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
512     * If the L02 MSR bitmap does not intercept the MSR, then we need to
513     * save it.
514     */
515     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
516     - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
517     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
518     + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
519    
520     if (svm->spec_ctrl)
521     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
522     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
523    
524     /* Eliminate branch target predictions from guest mode */
525     vmexit_fill_RSB();
526     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
527     index c51aaac953b4..0f3bb4632310 100644
528     --- a/arch/x86/kvm/vmx.c
529     +++ b/arch/x86/kvm/vmx.c
530     @@ -49,6 +49,7 @@
531     #include <asm/kexec.h>
532     #include <asm/apic.h>
533     #include <asm/irq_remapping.h>
534     +#include <asm/microcode.h>
535     #include <asm/nospec-branch.h>
536    
537     #include "trace.h"
538     @@ -8906,7 +8907,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
539     * being speculatively taken.
540     */
541     if (vmx->spec_ctrl)
542     - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
543     + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
544    
545     vmx->__launched = vmx->loaded_vmcs->launched;
546     asm(
547     @@ -9041,11 +9042,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
548     * If the L02 MSR bitmap does not intercept the MSR, then we need to
549     * save it.
550     */
551     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
552     - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
553     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
554     + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
555    
556     if (vmx->spec_ctrl)
557     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
558     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
559    
560     /* Eliminate branch target predictions from guest mode */
561     vmexit_fill_RSB();
562     diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
563     index 578973ade71b..eac92e2d171b 100644
564     --- a/arch/x86/mm/tlb.c
565     +++ b/arch/x86/mm/tlb.c
566     @@ -10,6 +10,7 @@
567    
568     #include <asm/tlbflush.h>
569     #include <asm/mmu_context.h>
570     +#include <asm/nospec-branch.h>
571     #include <asm/cache.h>
572     #include <asm/apic.h>
573     #include <asm/uv/uv.h>
574     @@ -29,6 +30,8 @@
575     * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
576     */
577    
578     +atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
579     +
580     struct flush_tlb_info {
581     struct mm_struct *flush_mm;
582     unsigned long flush_start;
583     @@ -104,6 +107,28 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
584     unsigned cpu = smp_processor_id();
585    
586     if (likely(prev != next)) {
587     + u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
588     +
589     + /*
590     + * Avoid user/user BTB poisoning by flushing the branch
591     + * predictor when switching between processes. This stops
592     + * one process from doing Spectre-v2 attacks on another.
593     + *
594     + * As an optimization, flush indirect branches only when
595     + * switching into processes that disable dumping. This
596     + * protects high value processes like gpg, without having
597     + * too high performance overhead. IBPB is *expensive*!
598     + *
599     + * This will not flush branches when switching into kernel
600     + * threads. It will also not flush if we switch to idle
601     + * thread and back to the same process. It will flush if we
602     + * switch to a different non-dumpable process.
603     + */
604     + if (tsk && tsk->mm &&
605     + tsk->mm->context.ctx_id != last_ctx_id &&
606     + get_dumpable(tsk->mm) != SUID_DUMP_USER)
607     + indirect_branch_prediction_barrier();
608     +
609     if (IS_ENABLED(CONFIG_VMAP_STACK)) {
610     /*
611     * If our current stack is in vmalloc space and isn't
612     @@ -118,6 +143,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
613     set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
614     }
615    
616     + /*
617     + * Record last user mm's context id, so we can avoid
618     + * flushing branch buffer with IBPB if we switch back
619     + * to the same user.
620     + */
621     + if (next != &init_mm)
622     + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
623     +
624     this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
625     this_cpu_write(cpu_tlbstate.active_mm, next);
626    
627     diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
628     index 7840331d3056..1f7ed2ed6ff7 100644
629     --- a/arch/x86/net/bpf_jit_comp.c
630     +++ b/arch/x86/net/bpf_jit_comp.c
631     @@ -12,6 +12,7 @@
632     #include <linux/filter.h>
633     #include <linux/if_vlan.h>
634     #include <asm/cacheflush.h>
635     +#include <asm/nospec-branch.h>
636     #include <linux/bpf.h>
637    
638     int bpf_jit_enable __read_mostly;
639     @@ -281,7 +282,7 @@ static void emit_bpf_tail_call(u8 **pprog)
640     EMIT2(0x89, 0xD2); /* mov edx, edx */
641     EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
642     offsetof(struct bpf_array, map.max_entries));
643     -#define OFFSET1 43 /* number of bytes to jump */
644     +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
645     EMIT2(X86_JBE, OFFSET1); /* jbe out */
646     label1 = cnt;
647    
648     @@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
649     */
650     EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
651     EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
652     -#define OFFSET2 32
653     +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
654     EMIT2(X86_JA, OFFSET2); /* ja out */
655     label2 = cnt;
656     EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
657     @@ -304,7 +305,7 @@ static void emit_bpf_tail_call(u8 **pprog)
658     * goto out;
659     */
660     EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
661     -#define OFFSET3 10
662     +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
663     EMIT2(X86_JE, OFFSET3); /* je out */
664     label3 = cnt;
665    
666     @@ -317,7 +318,7 @@ static void emit_bpf_tail_call(u8 **pprog)
667     * rdi == ctx (1st arg)
668     * rax == prog->bpf_func + prologue_size
669     */
670     - EMIT2(0xFF, 0xE0); /* jmp rax */
671     + RETPOLINE_RAX_BPF_JIT();
672    
673     /* out: */
674     BUILD_BUG_ON(cnt - label1 != OFFSET1);
675     diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
676     index 7850128f0026..834783bc6752 100644
677     --- a/arch/x86/platform/intel-mid/intel-mid.c
678     +++ b/arch/x86/platform/intel-mid/intel-mid.c
679     @@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
680    
681     static void intel_mid_reboot(void)
682     {
683     - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
684     + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
685     }
686    
687     static unsigned long __init intel_mid_calibrate_tsc(void)
688     diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
689     index 7f664c416faf..4ecd0de08557 100644
690     --- a/arch/x86/xen/suspend.c
691     +++ b/arch/x86/xen/suspend.c
692     @@ -1,11 +1,14 @@
693     #include <linux/types.h>
694     #include <linux/tick.h>
695     +#include <linux/percpu-defs.h>
696    
697     #include <xen/xen.h>
698     #include <xen/interface/xen.h>
699     #include <xen/grant_table.h>
700     #include <xen/events.h>
701    
702     +#include <asm/cpufeatures.h>
703     +#include <asm/msr-index.h>
704     #include <asm/xen/hypercall.h>
705     #include <asm/xen/page.h>
706     #include <asm/fixmap.h>
707     @@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
708     xen_mm_unpin_all();
709     }
710    
711     +static DEFINE_PER_CPU(u64, spec_ctrl);
712     +
713     void xen_arch_pre_suspend(void)
714     {
715     if (xen_pv_domain())
716     @@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
717    
718     static void xen_vcpu_notify_restore(void *data)
719     {
720     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
721     + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
722     +
723     /* Boot processor notified via generic timekeeping_resume() */
724     if (smp_processor_id() == 0)
725     return;
726     @@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
727    
728     static void xen_vcpu_notify_suspend(void *data)
729     {
730     + u64 tmp;
731     +
732     tick_suspend_local();
733     +
734     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
735     + rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
736     + this_cpu_write(spec_ctrl, tmp);
737     + wrmsrl(MSR_IA32_SPEC_CTRL, 0);
738     + }
739     }
740    
741     void xen_arch_resume(void)
742     diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
743     index 6f060c76217b..7205e6da16cd 100644
744     --- a/drivers/char/tpm/st33zp24/st33zp24.c
745     +++ b/drivers/char/tpm/st33zp24/st33zp24.c
746     @@ -458,7 +458,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
747     size_t count)
748     {
749     int size = 0;
750     - int expected;
751     + u32 expected;
752    
753     if (!chip)
754     return -EBUSY;
755     @@ -475,7 +475,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
756     }
757    
758     expected = be32_to_cpu(*(__be32 *)(buf + 2));
759     - if (expected > count) {
760     + if (expected > count || expected < TPM_HEADER_SIZE) {
761     size = -EIO;
762     goto out;
763     }
764     diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
765     index 912ad30be585..65b824954bdc 100644
766     --- a/drivers/char/tpm/tpm-dev.c
767     +++ b/drivers/char/tpm/tpm-dev.c
768     @@ -136,6 +136,12 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
769     return -EFAULT;
770     }
771    
772     + if (in_size < 6 ||
773     + in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
774     + mutex_unlock(&priv->buffer_mutex);
775     + return -EINVAL;
776     + }
777     +
778     /* atomic tpm command send and result receive. We only hold the ops
779     * lock during this period so that the tpm can be unregistered even if
780     * the char dev is held open.
781     diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
782     index 62ee44e57ddc..da69ddea56cf 100644
783     --- a/drivers/char/tpm/tpm_i2c_infineon.c
784     +++ b/drivers/char/tpm/tpm_i2c_infineon.c
785     @@ -437,7 +437,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
786     static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
787     {
788     int size = 0;
789     - int expected, status;
790     + int status;
791     + u32 expected;
792    
793     if (count < TPM_HEADER_SIZE) {
794     size = -EIO;
795     @@ -452,7 +453,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
796     }
797    
798     expected = be32_to_cpu(*(__be32 *)(buf + 2));
799     - if ((size_t) expected > count) {
800     + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
801     size = -EIO;
802     goto out;
803     }
804     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
805     index c6428771841f..caa86b19c76d 100644
806     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
807     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
808     @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
809     struct device *dev = chip->dev.parent;
810     struct i2c_client *client = to_i2c_client(dev);
811     s32 rc;
812     - int expected, status, burst_count, retries, size = 0;
813     + int status;
814     + int burst_count;
815     + int retries;
816     + int size = 0;
817     + u32 expected;
818    
819     if (count < TPM_HEADER_SIZE) {
820     i2c_nuvoton_ready(chip); /* return to idle */
821     @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
822     * to machine native
823     */
824     expected = be32_to_cpu(*(__be32 *) (buf + 2));
825     - if (expected > count) {
826     + if (expected > count || expected < size) {
827     dev_err(dev, "%s() expected > count\n", __func__);
828     size = -EIO;
829     continue;
830     diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
831     index 8022bea27fed..06173d2e316f 100644
832     --- a/drivers/char/tpm/tpm_tis.c
833     +++ b/drivers/char/tpm/tpm_tis.c
834     @@ -98,7 +98,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
835     }
836    
837     static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
838     - u8 *value)
839     + const u8 *value)
840     {
841     struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
842    
843     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
844     index 4d24ec3d7cd6..f9aa47ec7af7 100644
845     --- a/drivers/char/tpm/tpm_tis_core.c
846     +++ b/drivers/char/tpm/tpm_tis_core.c
847     @@ -208,7 +208,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
848     {
849     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
850     int size = 0;
851     - int expected, status;
852     + int status;
853     + u32 expected;
854    
855     if (count < TPM_HEADER_SIZE) {
856     size = -EIO;
857     @@ -223,7 +224,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
858     }
859    
860     expected = be32_to_cpu(*(__be32 *) (buf + 2));
861     - if (expected > count) {
862     + if (expected > count || expected < TPM_HEADER_SIZE) {
863     size = -EIO;
864     goto out;
865     }
866     @@ -256,7 +257,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
867     * tpm.c can skip polling for the data to be available as the interrupt is
868     * waited for here
869     */
870     -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
871     +static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
872     {
873     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
874     int rc, status, burstcnt;
875     @@ -345,7 +346,7 @@ static void disable_interrupts(struct tpm_chip *chip)
876     * tpm.c can skip polling for the data to be available as the interrupt is
877     * waited for here
878     */
879     -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
880     +static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
881     {
882     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
883     int rc;
884     diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
885     index 9191aabbf9c2..e1c2193f2ed3 100644
886     --- a/drivers/char/tpm/tpm_tis_core.h
887     +++ b/drivers/char/tpm/tpm_tis_core.h
888     @@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
889     int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
890     u8 *result);
891     int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
892     - u8 *value);
893     + const u8 *value);
894     int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
895     int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
896     int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
897     @@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
898     }
899    
900     static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
901     - u16 len, u8 *value)
902     + u16 len, const u8 *value)
903     {
904     return data->phy_ops->write_bytes(data, addr, len, value);
905     }
906     diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
907     index 3b97b14c3417..01eccb193b5a 100644
908     --- a/drivers/char/tpm/tpm_tis_spi.c
909     +++ b/drivers/char/tpm/tpm_tis_spi.c
910     @@ -47,9 +47,7 @@
911     struct tpm_tis_spi_phy {
912     struct tpm_tis_data priv;
913     struct spi_device *spi_device;
914     -
915     - u8 tx_buf[4];
916     - u8 rx_buf[4];
917     + u8 *iobuf;
918     };
919    
920     static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
921     @@ -58,7 +56,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
922     }
923    
924     static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
925     - u8 *buffer, u8 direction)
926     + u8 *in, const u8 *out)
927     {
928     struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
929     int ret = 0;
930     @@ -72,14 +70,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
931     while (len) {
932     transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
933    
934     - phy->tx_buf[0] = direction | (transfer_len - 1);
935     - phy->tx_buf[1] = 0xd4;
936     - phy->tx_buf[2] = addr >> 8;
937     - phy->tx_buf[3] = addr;
938     + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
939     + phy->iobuf[1] = 0xd4;
940     + phy->iobuf[2] = addr >> 8;
941     + phy->iobuf[3] = addr;
942    
943     memset(&spi_xfer, 0, sizeof(spi_xfer));
944     - spi_xfer.tx_buf = phy->tx_buf;
945     - spi_xfer.rx_buf = phy->rx_buf;
946     + spi_xfer.tx_buf = phy->iobuf;
947     + spi_xfer.rx_buf = phy->iobuf;
948     spi_xfer.len = 4;
949     spi_xfer.cs_change = 1;
950    
951     @@ -89,9 +87,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
952     if (ret < 0)
953     goto exit;
954    
955     - if ((phy->rx_buf[3] & 0x01) == 0) {
956     + if ((phy->iobuf[3] & 0x01) == 0) {
957     // handle SPI wait states
958     - phy->tx_buf[0] = 0;
959     + phy->iobuf[0] = 0;
960    
961     for (i = 0; i < TPM_RETRY; i++) {
962     spi_xfer.len = 1;
963     @@ -100,7 +98,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
964     ret = spi_sync_locked(phy->spi_device, &m);
965     if (ret < 0)
966     goto exit;
967     - if (phy->rx_buf[0] & 0x01)
968     + if (phy->iobuf[0] & 0x01)
969     break;
970     }
971    
972     @@ -114,12 +112,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
973     spi_xfer.len = transfer_len;
974     spi_xfer.delay_usecs = 5;
975    
976     - if (direction) {
977     + if (in) {
978     spi_xfer.tx_buf = NULL;
979     - spi_xfer.rx_buf = buffer;
980     - } else {
981     - spi_xfer.tx_buf = buffer;
982     + } else if (out) {
983     spi_xfer.rx_buf = NULL;
984     + memcpy(phy->iobuf, out, transfer_len);
985     + out += transfer_len;
986     }
987    
988     spi_message_init(&m);
989     @@ -128,8 +126,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
990     if (ret < 0)
991     goto exit;
992    
993     + if (in) {
994     + memcpy(in, phy->iobuf, transfer_len);
995     + in += transfer_len;
996     + }
997     +
998     len -= transfer_len;
999     - buffer += transfer_len;
1000     }
1001    
1002     exit:
1003     @@ -140,13 +142,13 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1004     static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1005     u16 len, u8 *result)
1006     {
1007     - return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
1008     + return tpm_tis_spi_transfer(data, addr, len, result, NULL);
1009     }
1010    
1011     static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1012     - u16 len, u8 *value)
1013     + u16 len, const u8 *value)
1014     {
1015     - return tpm_tis_spi_transfer(data, addr, len, value, 0);
1016     + return tpm_tis_spi_transfer(data, addr, len, NULL, value);
1017     }
1018    
1019     static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1020     @@ -195,6 +197,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1021    
1022     phy->spi_device = dev;
1023    
1024     + phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
1025     + if (!phy->iobuf)
1026     + return -ENOMEM;
1027     +
1028     return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1029     NULL);
1030     }
1031     diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
1032     index 7b596fa38ad2..6bebc1f9f55a 100644
1033     --- a/drivers/cpufreq/s3c24xx-cpufreq.c
1034     +++ b/drivers/cpufreq/s3c24xx-cpufreq.c
1035     @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
1036     static int s3c_cpufreq_init(struct cpufreq_policy *policy)
1037     {
1038     policy->clk = clk_arm;
1039     - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
1040     +
1041     + policy->cpuinfo.transition_latency = cpu_cur.info->latency;
1042     +
1043     + if (ftab)
1044     + return cpufreq_table_validate_and_show(policy, ftab);
1045     +
1046     + return 0;
1047     }
1048    
1049     static int __init s3c_cpufreq_initclks(void)
1050     diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
1051     index 0bf1a12e35fe..ee6045d6c0bb 100644
1052     --- a/drivers/md/dm-io.c
1053     +++ b/drivers/md/dm-io.c
1054     @@ -302,6 +302,7 @@ static void do_region(int op, int op_flags, unsigned region,
1055     special_cmd_max_sectors = q->limits.max_write_same_sectors;
1056     if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
1057     special_cmd_max_sectors == 0) {
1058     + atomic_inc(&io->count);
1059     dec_count(io, region, -EOPNOTSUPP);
1060     return;
1061     }
1062     diff --git a/drivers/md/md.c b/drivers/md/md.c
1063     index 8ebf1b97e1d2..27d8bb21e04f 100644
1064     --- a/drivers/md/md.c
1065     +++ b/drivers/md/md.c
1066     @@ -8224,6 +8224,10 @@ static int remove_and_add_spares(struct mddev *mddev,
1067     int removed = 0;
1068     bool remove_some = false;
1069    
1070     + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
1071     + /* Mustn't remove devices when resync thread is running */
1072     + return 0;
1073     +
1074     rdev_for_each(rdev, mddev) {
1075     if ((this == NULL || rdev == this) &&
1076     rdev->raid_disk >= 0 &&
1077     diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
1078     index e0fe5bc9dbce..31f16105184c 100644
1079     --- a/drivers/media/dvb-frontends/m88ds3103.c
1080     +++ b/drivers/media/dvb-frontends/m88ds3103.c
1081     @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
1082     * New users must use I2C client binding directly!
1083     */
1084     struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1085     - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
1086     + struct i2c_adapter *i2c,
1087     + struct i2c_adapter **tuner_i2c_adapter)
1088     {
1089     struct i2c_client *client;
1090     struct i2c_board_info board_info;
1091     - struct m88ds3103_platform_data pdata;
1092     + struct m88ds3103_platform_data pdata = {};
1093    
1094     pdata.clk = cfg->clock;
1095     pdata.i2c_wr_max = cfg->i2c_wr_max;
1096     @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
1097     case M88DS3103_CHIP_ID:
1098     break;
1099     default:
1100     + ret = -ENODEV;
1101     + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
1102     goto err_kfree;
1103     }
1104    
1105     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1106     index 1e2c8eca3af1..bea9ae31a769 100644
1107     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1108     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1109     @@ -809,6 +809,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1110     bool dynamic)
1111     {
1112     char *sfd_pl;
1113     + u8 num_rec;
1114     int err;
1115    
1116     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1117     @@ -818,9 +819,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1118     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1119     mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1120     mac, fid, action, local_port);
1121     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1122     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1123     - kfree(sfd_pl);
1124     + if (err)
1125     + goto out;
1126     +
1127     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1128     + err = -EBUSY;
1129    
1130     +out:
1131     + kfree(sfd_pl);
1132     return err;
1133     }
1134    
1135     @@ -845,6 +853,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1136     bool adding, bool dynamic)
1137     {
1138     char *sfd_pl;
1139     + u8 num_rec;
1140     int err;
1141    
1142     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1143     @@ -855,9 +864,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1144     mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1145     mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1146     lag_vid, lag_id);
1147     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1148     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1149     - kfree(sfd_pl);
1150     + if (err)
1151     + goto out;
1152     +
1153     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1154     + err = -EBUSY;
1155    
1156     +out:
1157     + kfree(sfd_pl);
1158     return err;
1159     }
1160    
1161     @@ -891,6 +907,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1162     u16 fid, u16 mid, bool adding)
1163     {
1164     char *sfd_pl;
1165     + u8 num_rec;
1166     int err;
1167    
1168     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1169     @@ -900,7 +917,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1170     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1171     mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1172     MLXSW_REG_SFD_REC_ACTION_NOP, mid);
1173     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1174     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1175     + if (err)
1176     + goto out;
1177     +
1178     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1179     + err = -EBUSY;
1180     +
1181     +out:
1182     kfree(sfd_pl);
1183     return err;
1184     }
1185     diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1186     index 6e12401b5102..e2d9ca60e467 100644
1187     --- a/drivers/net/phy/phy.c
1188     +++ b/drivers/net/phy/phy.c
1189     @@ -925,7 +925,7 @@ void phy_start(struct phy_device *phydev)
1190     break;
1191     case PHY_HALTED:
1192     /* make sure interrupts are re-enabled for the PHY */
1193     - if (phydev->irq != PHY_POLL) {
1194     + if (phy_interrupt_is_valid(phydev)) {
1195     err = phy_enable_interrupts(phydev);
1196     if (err < 0)
1197     break;
1198     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1199     index fc4c2ccc3d22..114457921890 100644
1200     --- a/drivers/net/ppp/ppp_generic.c
1201     +++ b/drivers/net/ppp/ppp_generic.c
1202     @@ -3157,6 +3157,15 @@ ppp_connect_channel(struct channel *pch, int unit)
1203     goto outl;
1204    
1205     ppp_lock(ppp);
1206     + spin_lock_bh(&pch->downl);
1207     + if (!pch->chan) {
1208     + /* Don't connect unregistered channels */
1209     + spin_unlock_bh(&pch->downl);
1210     + ppp_unlock(ppp);
1211     + ret = -ENOTCONN;
1212     + goto outl;
1213     + }
1214     + spin_unlock_bh(&pch->downl);
1215     if (pch->file.hdrlen > ppp->file.hdrlen)
1216     ppp->file.hdrlen = pch->file.hdrlen;
1217     hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
1218     diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
1219     index 47fdb87d3567..8a9aced850be 100644
1220     --- a/drivers/net/wan/hdlc_ppp.c
1221     +++ b/drivers/net/wan/hdlc_ppp.c
1222     @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
1223     ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
1224     0, NULL);
1225     proto->restart_counter--;
1226     - } else
1227     + } else if (netif_carrier_ok(proto->dev))
1228     + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
1229     + 0, NULL);
1230     + else
1231     ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
1232     0, NULL);
1233     break;
1234     diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
1235     index b0916b126923..6643a7bc381c 100644
1236     --- a/drivers/pci/pcie/aspm.c
1237     +++ b/drivers/pci/pcie/aspm.c
1238     @@ -526,10 +526,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
1239    
1240     /*
1241     * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
1242     - * hierarchies.
1243     + * hierarchies. Note that some PCIe host implementations omit
1244     + * the root ports entirely, in which case a downstream port on
1245     + * a switch may become the root of the link state chain for all
1246     + * its subordinate endpoints.
1247     */
1248     if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
1249     - pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
1250     + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
1251     + !pdev->bus->parent->self) {
1252     link->root = link;
1253     } else {
1254     struct pcie_link_state *parent;
1255     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
1256     index 9b5fc502f6a1..403712bf1ddf 100644
1257     --- a/drivers/s390/net/qeth_core.h
1258     +++ b/drivers/s390/net/qeth_core.h
1259     @@ -592,6 +592,11 @@ struct qeth_cmd_buffer {
1260     void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
1261     };
1262    
1263     +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
1264     +{
1265     + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
1266     +}
1267     +
1268     /**
1269     * definition of a qeth channel, used for read and write
1270     */
1271     @@ -849,7 +854,7 @@ struct qeth_trap_id {
1272     */
1273     static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
1274     {
1275     - return PFN_UP(end - 1) - PFN_DOWN(start);
1276     + return PFN_UP(end) - PFN_DOWN(start);
1277     }
1278    
1279     static inline int qeth_get_micros(void)
1280     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
1281     index df8f74cb1406..cc28dda322b5 100644
1282     --- a/drivers/s390/net/qeth_core_main.c
1283     +++ b/drivers/s390/net/qeth_core_main.c
1284     @@ -2050,7 +2050,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
1285     unsigned long flags;
1286     struct qeth_reply *reply = NULL;
1287     unsigned long timeout, event_timeout;
1288     - struct qeth_ipa_cmd *cmd;
1289     + struct qeth_ipa_cmd *cmd = NULL;
1290    
1291     QETH_CARD_TEXT(card, 2, "sendctl");
1292    
1293     @@ -2064,23 +2064,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
1294     }
1295     reply->callback = reply_cb;
1296     reply->param = reply_param;
1297     - if (card->state == CARD_STATE_DOWN)
1298     - reply->seqno = QETH_IDX_COMMAND_SEQNO;
1299     - else
1300     - reply->seqno = card->seqno.ipa++;
1301     +
1302     init_waitqueue_head(&reply->wait_q);
1303     - spin_lock_irqsave(&card->lock, flags);
1304     - list_add_tail(&reply->list, &card->cmd_waiter_list);
1305     - spin_unlock_irqrestore(&card->lock, flags);
1306     QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
1307    
1308     while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1309     - qeth_prepare_control_data(card, len, iob);
1310    
1311     - if (IS_IPA(iob->data))
1312     + if (IS_IPA(iob->data)) {
1313     + cmd = __ipa_cmd(iob);
1314     + cmd->hdr.seqno = card->seqno.ipa++;
1315     + reply->seqno = cmd->hdr.seqno;
1316     event_timeout = QETH_IPA_TIMEOUT;
1317     - else
1318     + } else {
1319     + reply->seqno = QETH_IDX_COMMAND_SEQNO;
1320     event_timeout = QETH_TIMEOUT;
1321     + }
1322     + qeth_prepare_control_data(card, len, iob);
1323     +
1324     + spin_lock_irqsave(&card->lock, flags);
1325     + list_add_tail(&reply->list, &card->cmd_waiter_list);
1326     + spin_unlock_irqrestore(&card->lock, flags);
1327     +
1328     timeout = jiffies + event_timeout;
1329    
1330     QETH_CARD_TEXT(card, 6, "noirqpnd");
1331     @@ -2105,9 +2109,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
1332    
1333     /* we have only one long running ipassist, since we can ensure
1334     process context of this command we can sleep */
1335     - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
1336     - if ((cmd->hdr.command == IPA_CMD_SETIP) &&
1337     - (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
1338     + if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
1339     + cmd->hdr.prot_version == QETH_PROT_IPV4) {
1340     if (!wait_event_timeout(reply->wait_q,
1341     atomic_read(&reply->received), event_timeout))
1342     goto time_err;
1343     @@ -2871,7 +2874,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
1344     memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
1345     cmd->hdr.command = command;
1346     cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
1347     - cmd->hdr.seqno = card->seqno.ipa;
1348     + /* cmd->hdr.seqno is set by qeth_send_control_data() */
1349     cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
1350     cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
1351     if (card->options.layer2)
1352     @@ -3852,10 +3855,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
1353     int qeth_get_elements_no(struct qeth_card *card,
1354     struct sk_buff *skb, int extra_elems, int data_offset)
1355     {
1356     - int elements = qeth_get_elements_for_range(
1357     - (addr_t)skb->data + data_offset,
1358     - (addr_t)skb->data + skb_headlen(skb)) +
1359     - qeth_get_elements_for_frags(skb);
1360     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
1361     + int elements = qeth_get_elements_for_frags(skb);
1362     + addr_t start = (addr_t)skb->data + data_offset;
1363     +
1364     + if (start != end)
1365     + elements += qeth_get_elements_for_range(start, end);
1366    
1367     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
1368     QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
1369     diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
1370     index eedf9b01a496..573569474e44 100644
1371     --- a/drivers/s390/net/qeth_l3.h
1372     +++ b/drivers/s390/net/qeth_l3.h
1373     @@ -39,8 +39,40 @@ struct qeth_ipaddr {
1374     unsigned int pfxlen;
1375     } a6;
1376     } u;
1377     -
1378     };
1379     +
1380     +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
1381     + struct qeth_ipaddr *a2)
1382     +{
1383     + if (a1->proto != a2->proto)
1384     + return false;
1385     + if (a1->proto == QETH_PROT_IPV6)
1386     + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
1387     + return a1->u.a4.addr == a2->u.a4.addr;
1388     +}
1389     +
1390     +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
1391     + struct qeth_ipaddr *a2)
1392     +{
1393     + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
1394     + * so 'proto' and 'addr' match for sure.
1395     + *
1396     + * For ucast:
1397     + * - 'mac' is always 0.
1398     + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
1399     + * values are required to avoid mixups in takeover eligibility.
1400     + *
1401     + * For mcast,
1402     + * - 'mac' is mapped from the IP, and thus always matches.
1403     + * - 'mask'/'pfxlen' is always 0.
1404     + */
1405     + if (a1->type != a2->type)
1406     + return false;
1407     + if (a1->proto == QETH_PROT_IPV6)
1408     + return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
1409     + return a1->u.a4.mask == a2->u.a4.mask;
1410     +}
1411     +
1412     static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
1413     {
1414     u64 ret = 0;
1415     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1416     index 1487f8a0c575..a668e6b71a29 100644
1417     --- a/drivers/s390/net/qeth_l3_main.c
1418     +++ b/drivers/s390/net/qeth_l3_main.c
1419     @@ -154,6 +154,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
1420     return -EINVAL;
1421     }
1422    
1423     +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
1424     + struct qeth_ipaddr *query)
1425     +{
1426     + u64 key = qeth_l3_ipaddr_hash(query);
1427     + struct qeth_ipaddr *addr;
1428     +
1429     + if (query->is_multicast) {
1430     + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
1431     + if (qeth_l3_addr_match_ip(addr, query))
1432     + return addr;
1433     + } else {
1434     + hash_for_each_possible(card->ip_htable, addr, hnode, key)
1435     + if (qeth_l3_addr_match_ip(addr, query))
1436     + return addr;
1437     + }
1438     + return NULL;
1439     +}
1440     +
1441     static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
1442     {
1443     int i, j;
1444     @@ -207,34 +225,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1445     return rc;
1446     }
1447    
1448     -inline int
1449     -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
1450     -{
1451     - return addr1->proto == addr2->proto &&
1452     - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
1453     - !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
1454     -}
1455     -
1456     -static struct qeth_ipaddr *
1457     -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1458     -{
1459     - struct qeth_ipaddr *addr;
1460     -
1461     - if (tmp_addr->is_multicast) {
1462     - hash_for_each_possible(card->ip_mc_htable, addr,
1463     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
1464     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
1465     - return addr;
1466     - } else {
1467     - hash_for_each_possible(card->ip_htable, addr,
1468     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
1469     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
1470     - return addr;
1471     - }
1472     -
1473     - return NULL;
1474     -}
1475     -
1476     int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1477     {
1478     int rc = 0;
1479     @@ -249,8 +239,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1480     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
1481     }
1482    
1483     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
1484     - if (!addr)
1485     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
1486     + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
1487     return -ENOENT;
1488    
1489     addr->ref_counter--;
1490     @@ -259,12 +249,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1491     if (addr->in_progress)
1492     return -EINPROGRESS;
1493    
1494     - if (!qeth_card_hw_is_reachable(card)) {
1495     - addr->disp_flag = QETH_DISP_ADDR_DELETE;
1496     - return 0;
1497     - }
1498     -
1499     - rc = qeth_l3_deregister_addr_entry(card, addr);
1500     + if (qeth_card_hw_is_reachable(card))
1501     + rc = qeth_l3_deregister_addr_entry(card, addr);
1502    
1503     hash_del(&addr->hnode);
1504     kfree(addr);
1505     @@ -276,6 +262,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1506     {
1507     int rc = 0;
1508     struct qeth_ipaddr *addr;
1509     + char buf[40];
1510    
1511     QETH_CARD_TEXT(card, 4, "addip");
1512    
1513     @@ -286,8 +273,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1514     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
1515     }
1516    
1517     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
1518     - if (!addr) {
1519     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
1520     + if (addr) {
1521     + if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
1522     + return -EADDRINUSE;
1523     + if (qeth_l3_addr_match_all(addr, tmp_addr)) {
1524     + addr->ref_counter++;
1525     + return 0;
1526     + }
1527     + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
1528     + buf);
1529     + dev_warn(&card->gdev->dev,
1530     + "Registering IP address %s failed\n", buf);
1531     + return -EADDRINUSE;
1532     + } else {
1533     addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
1534     if (!addr)
1535     return -ENOMEM;
1536     @@ -327,18 +326,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1537     (rc == IPA_RC_LAN_OFFLINE)) {
1538     addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1539     if (addr->ref_counter < 1) {
1540     - qeth_l3_delete_ip(card, addr);
1541     + qeth_l3_deregister_addr_entry(card, addr);
1542     + hash_del(&addr->hnode);
1543     kfree(addr);
1544     }
1545     } else {
1546     hash_del(&addr->hnode);
1547     kfree(addr);
1548     }
1549     - } else {
1550     - if (addr->type == QETH_IP_TYPE_NORMAL)
1551     - addr->ref_counter++;
1552     }
1553     -
1554     return rc;
1555     }
1556    
1557     @@ -406,11 +402,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
1558     spin_lock_bh(&card->ip_lock);
1559    
1560     hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
1561     - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
1562     - qeth_l3_deregister_addr_entry(card, addr);
1563     - hash_del(&addr->hnode);
1564     - kfree(addr);
1565     - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
1566     + if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
1567     if (addr->proto == QETH_PROT_IPV4) {
1568     addr->in_progress = 1;
1569     spin_unlock_bh(&card->ip_lock);
1570     @@ -726,12 +718,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
1571     return -ENOMEM;
1572    
1573     spin_lock_bh(&card->ip_lock);
1574     -
1575     - if (qeth_l3_ip_from_hash(card, ipaddr))
1576     - rc = -EEXIST;
1577     - else
1578     - qeth_l3_add_ip(card, ipaddr);
1579     -
1580     + rc = qeth_l3_add_ip(card, ipaddr);
1581     spin_unlock_bh(&card->ip_lock);
1582    
1583     kfree(ipaddr);
1584     @@ -794,12 +781,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
1585     return -ENOMEM;
1586    
1587     spin_lock_bh(&card->ip_lock);
1588     -
1589     - if (qeth_l3_ip_from_hash(card, ipaddr))
1590     - rc = -EEXIST;
1591     - else
1592     - qeth_l3_add_ip(card, ipaddr);
1593     -
1594     + rc = qeth_l3_add_ip(card, ipaddr);
1595     spin_unlock_bh(&card->ip_lock);
1596    
1597     kfree(ipaddr);
1598     @@ -1444,8 +1426,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1599     memcpy(tmp->mac, buf, sizeof(tmp->mac));
1600     tmp->is_multicast = 1;
1601    
1602     - ipm = qeth_l3_ip_from_hash(card, tmp);
1603     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
1604     if (ipm) {
1605     + /* for mcast, by-IP match means full match */
1606     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1607     } else {
1608     ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1609     @@ -1528,8 +1511,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
1610     sizeof(struct in6_addr));
1611     tmp->is_multicast = 1;
1612    
1613     - ipm = qeth_l3_ip_from_hash(card, tmp);
1614     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
1615     if (ipm) {
1616     + /* for mcast, by-IP match means full match */
1617     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1618     continue;
1619     }
1620     @@ -2784,11 +2768,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
1621     static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
1622     struct sk_buff *skb, int extra_elems)
1623     {
1624     - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
1625     - int elements = qeth_get_elements_for_range(
1626     - tcpdptr,
1627     - (addr_t)skb->data + skb_headlen(skb)) +
1628     - qeth_get_elements_for_frags(skb);
1629     + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
1630     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
1631     + int elements = qeth_get_elements_for_frags(skb);
1632     +
1633     + if (start != end)
1634     + elements += qeth_get_elements_for_range(start, end);
1635    
1636     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
1637     QETH_DBF_MESSAGE(2,
1638     diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
1639     index 8d8370ddb6b2..1ba49ebe67da 100644
1640     --- a/fs/btrfs/acl.c
1641     +++ b/fs/btrfs/acl.c
1642     @@ -114,13 +114,17 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
1643     int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1644     {
1645     int ret;
1646     + umode_t old_mode = inode->i_mode;
1647    
1648     if (type == ACL_TYPE_ACCESS && acl) {
1649     ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1650     if (ret)
1651     return ret;
1652     }
1653     - return __btrfs_set_acl(NULL, inode, acl, type);
1654     + ret = __btrfs_set_acl(NULL, inode, acl, type);
1655     + if (ret)
1656     + inode->i_mode = old_mode;
1657     + return ret;
1658     }
1659    
1660     /*
1661     diff --git a/include/linux/fs.h b/include/linux/fs.h
1662     index 745ea1b2e02c..18552189560b 100644
1663     --- a/include/linux/fs.h
1664     +++ b/include/linux/fs.h
1665     @@ -3048,7 +3048,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
1666     if (!vma_is_dax(vma))
1667     return false;
1668     inode = file_inode(vma->vm_file);
1669     - if (inode->i_mode == S_IFCHR)
1670     + if (S_ISCHR(inode->i_mode))
1671     return false; /* device-dax */
1672     return true;
1673     }
1674     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
1675     index fbc98e2c8228..132e3f5a2e0d 100644
1676     --- a/include/linux/nospec.h
1677     +++ b/include/linux/nospec.h
1678     @@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
1679     BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
1680     BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
1681     \
1682     - _i &= _mask; \
1683     - _i; \
1684     + (typeof(_i)) (_i & _mask); \
1685     })
1686     #endif /* _LINUX_NOSPEC_H */
1687     diff --git a/include/net/udplite.h b/include/net/udplite.h
1688     index 80761938b9a7..8228155b305e 100644
1689     --- a/include/net/udplite.h
1690     +++ b/include/net/udplite.h
1691     @@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
1692     UDP_SKB_CB(skb)->cscov = cscov;
1693     if (skb->ip_summed == CHECKSUM_COMPLETE)
1694     skb->ip_summed = CHECKSUM_NONE;
1695     + skb->csum_valid = 0;
1696     }
1697    
1698     return 0;
1699     diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
1700     index 9a1e6ed7babc..a38119e4a427 100644
1701     --- a/kernel/bpf/arraymap.c
1702     +++ b/kernel/bpf/arraymap.c
1703     @@ -20,8 +20,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
1704     {
1705     int i;
1706    
1707     - for (i = 0; i < array->map.max_entries; i++)
1708     + for (i = 0; i < array->map.max_entries; i++) {
1709     free_percpu(array->pptrs[i]);
1710     + cond_resched();
1711     + }
1712     }
1713    
1714     static int bpf_array_alloc_percpu(struct bpf_array *array)
1715     @@ -37,6 +39,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
1716     return -ENOMEM;
1717     }
1718     array->pptrs[i] = ptr;
1719     + cond_resched();
1720     }
1721    
1722     return 0;
1723     @@ -48,8 +51,9 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
1724     bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
1725     u32 elem_size, index_mask, max_entries;
1726     bool unpriv = !capable(CAP_SYS_ADMIN);
1727     + u64 cost, array_size, mask64;
1728     struct bpf_array *array;
1729     - u64 array_size, mask64;
1730     + int ret;
1731    
1732     /* check sanity of attributes */
1733     if (attr->max_entries == 0 || attr->key_size != 4 ||
1734     @@ -92,8 +96,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
1735     array_size += (u64) max_entries * elem_size;
1736    
1737     /* make sure there is no u32 overflow later in round_up() */
1738     - if (array_size >= U32_MAX - PAGE_SIZE)
1739     + cost = array_size;
1740     + if (cost >= U32_MAX - PAGE_SIZE)
1741     return ERR_PTR(-ENOMEM);
1742     + if (percpu) {
1743     + cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
1744     + if (cost >= U32_MAX - PAGE_SIZE)
1745     + return ERR_PTR(-ENOMEM);
1746     + }
1747     + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1748     +
1749     + ret = bpf_map_precharge_memlock(cost);
1750     + if (ret < 0)
1751     + return ERR_PTR(ret);
1752    
1753     /* allocate all map elements and zero-initialize them */
1754     array = bpf_map_area_alloc(array_size);
1755     @@ -107,20 +122,16 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
1756     array->map.key_size = attr->key_size;
1757     array->map.value_size = attr->value_size;
1758     array->map.max_entries = attr->max_entries;
1759     + array->map.map_flags = attr->map_flags;
1760     + array->map.pages = cost;
1761     array->elem_size = elem_size;
1762    
1763     - if (!percpu)
1764     - goto out;
1765     -
1766     - array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
1767     -
1768     - if (array_size >= U32_MAX - PAGE_SIZE ||
1769     - elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
1770     + if (percpu &&
1771     + (elem_size > PCPU_MIN_UNIT_SIZE ||
1772     + bpf_array_alloc_percpu(array))) {
1773     bpf_map_area_free(array);
1774     return ERR_PTR(-ENOMEM);
1775     }
1776     -out:
1777     - array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
1778    
1779     return &array->map;
1780     }
1781     diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
1782     index be8519148c25..a2a232dec236 100644
1783     --- a/kernel/bpf/stackmap.c
1784     +++ b/kernel/bpf/stackmap.c
1785     @@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
1786     smap->map.key_size = attr->key_size;
1787     smap->map.value_size = value_size;
1788     smap->map.max_entries = attr->max_entries;
1789     + smap->map.map_flags = attr->map_flags;
1790     smap->n_buckets = n_buckets;
1791     smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
1792    
1793     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1794     index 2d5cc7dfee14..7c477912f36d 100644
1795     --- a/kernel/time/timer.c
1796     +++ b/kernel/time/timer.c
1797     @@ -1884,6 +1884,12 @@ int timers_dead_cpu(unsigned int cpu)
1798     spin_lock_irq(&new_base->lock);
1799     spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1800    
1801     + /*
1802     + * The current CPUs base clock might be stale. Update it
1803     + * before moving the timers over.
1804     + */
1805     + forward_timer_base(new_base);
1806     +
1807     BUG_ON(old_base->running_timer);
1808    
1809     for (i = 0; i < WHEEL_SIZE; i++)
1810     diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
1811     index 8bd569695e76..abf711112418 100644
1812     --- a/net/bridge/br_sysfs_if.c
1813     +++ b/net/bridge/br_sysfs_if.c
1814     @@ -230,6 +230,9 @@ static ssize_t brport_show(struct kobject *kobj,
1815     struct brport_attribute *brport_attr = to_brport_attr(attr);
1816     struct net_bridge_port *p = to_brport(kobj);
1817    
1818     + if (!brport_attr->show)
1819     + return -EINVAL;
1820     +
1821     return brport_attr->show(p, buf);
1822     }
1823    
1824     diff --git a/net/core/dev.c b/net/core/dev.c
1825     index 8898618bf341..272f84ad16e0 100644
1826     --- a/net/core/dev.c
1827     +++ b/net/core/dev.c
1828     @@ -2199,8 +2199,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
1829     */
1830     int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1831     {
1832     + bool disabling;
1833     int rc;
1834    
1835     + disabling = txq < dev->real_num_tx_queues;
1836     +
1837     if (txq < 1 || txq > dev->num_tx_queues)
1838     return -EINVAL;
1839    
1840     @@ -2216,15 +2219,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1841     if (dev->num_tc)
1842     netif_setup_tc(dev, txq);
1843    
1844     - if (txq < dev->real_num_tx_queues) {
1845     + dev->real_num_tx_queues = txq;
1846     +
1847     + if (disabling) {
1848     + synchronize_net();
1849     qdisc_reset_all_tx_gt(dev, txq);
1850     #ifdef CONFIG_XPS
1851     netif_reset_xps_queues_gt(dev, txq);
1852     #endif
1853     }
1854     + } else {
1855     + dev->real_num_tx_queues = txq;
1856     }
1857    
1858     - dev->real_num_tx_queues = txq;
1859     return 0;
1860     }
1861     EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1862     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1863     index 38c1c979ecb1..7e7b7a3efa99 100644
1864     --- a/net/ipv4/fib_semantics.c
1865     +++ b/net/ipv4/fib_semantics.c
1866     @@ -640,6 +640,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
1867     fi->fib_nh, cfg))
1868     return 1;
1869     }
1870     +#ifdef CONFIG_IP_ROUTE_CLASSID
1871     + if (cfg->fc_flow &&
1872     + cfg->fc_flow != fi->fib_nh->nh_tclassid)
1873     + return 1;
1874     +#endif
1875     if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
1876     (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
1877     return 0;
1878     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1879     index 7ac319222558..4c9fbf4f5905 100644
1880     --- a/net/ipv4/route.c
1881     +++ b/net/ipv4/route.c
1882     @@ -126,10 +126,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
1883     static int ip_rt_error_cost __read_mostly = HZ;
1884     static int ip_rt_error_burst __read_mostly = 5 * HZ;
1885     static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
1886     -static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
1887     +static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
1888     static int ip_rt_min_advmss __read_mostly = 256;
1889    
1890     static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
1891     +
1892     +static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
1893     +
1894     /*
1895     * Interface to generic destination cache.
1896     */
1897     @@ -2772,7 +2775,8 @@ static struct ctl_table ipv4_route_table[] = {
1898     .data = &ip_rt_min_pmtu,
1899     .maxlen = sizeof(int),
1900     .mode = 0644,
1901     - .proc_handler = proc_dointvec,
1902     + .proc_handler = proc_dointvec_minmax,
1903     + .extra1 = &ip_min_valid_pmtu,
1904     },
1905     {
1906     .procname = "min_adv_mss",
1907     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1908     index 3d7b59ecc76c..a69606031e5f 100644
1909     --- a/net/ipv4/tcp_output.c
1910     +++ b/net/ipv4/tcp_output.c
1911     @@ -1580,7 +1580,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1912     */
1913     segs = max_t(u32, bytes / mss_now, min_tso_segs);
1914    
1915     - return min_t(u32, segs, sk->sk_gso_max_segs);
1916     + return segs;
1917     }
1918     EXPORT_SYMBOL(tcp_tso_autosize);
1919    
1920     @@ -1592,8 +1592,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1921     const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1922     u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1923    
1924     - return tso_segs ? :
1925     - tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
1926     + if (!tso_segs)
1927     + tso_segs = tcp_tso_autosize(sk, mss_now,
1928     + sysctl_tcp_min_tso_segs);
1929     + return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1930     }
1931    
1932     /* Returns the portion of skb which can be sent right away */
1933     @@ -1907,6 +1909,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
1934     }
1935     }
1936    
1937     +static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
1938     +{
1939     + struct sk_buff *skb, *next;
1940     +
1941     + skb = tcp_send_head(sk);
1942     + tcp_for_write_queue_from_safe(skb, next, sk) {
1943     + if (len <= skb->len)
1944     + break;
1945     +
1946     + if (unlikely(TCP_SKB_CB(skb)->eor))
1947     + return false;
1948     +
1949     + len -= skb->len;
1950     + }
1951     +
1952     + return true;
1953     +}
1954     +
1955     /* Create a new MTU probe if we are ready.
1956     * MTU probe is regularly attempting to increase the path MTU by
1957     * deliberately sending larger packets. This discovers routing
1958     @@ -1979,6 +1999,9 @@ static int tcp_mtu_probe(struct sock *sk)
1959     return 0;
1960     }
1961    
1962     + if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
1963     + return -1;
1964     +
1965     /* We're allowed to probe. Build it now. */
1966     nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
1967     if (!nskb)
1968     @@ -2014,6 +2037,10 @@ static int tcp_mtu_probe(struct sock *sk)
1969     /* We've eaten all the data from this skb.
1970     * Throw it away. */
1971     TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1972     + /* If this is the last SKB we copy and eor is set
1973     + * we need to propagate it to the new skb.
1974     + */
1975     + TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
1976     tcp_unlink_write_queue(skb, sk);
1977     sk_wmem_free_skb(sk, skb);
1978     } else {
1979     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1980     index bef4a94ce1a0..4cd943096afa 100644
1981     --- a/net/ipv4/udp.c
1982     +++ b/net/ipv4/udp.c
1983     @@ -1713,6 +1713,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1984     err = udplite_checksum_init(skb, uh);
1985     if (err)
1986     return err;
1987     +
1988     + if (UDP_SKB_CB(skb)->partial_cov) {
1989     + skb->csum = inet_compute_pseudo(skb, proto);
1990     + return 0;
1991     + }
1992     }
1993    
1994     /* Note, we are only interested in != 0 or == 0, thus the
1995     diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
1996     index c0cbcb259f5a..1dc023ca98fd 100644
1997     --- a/net/ipv6/ip6_checksum.c
1998     +++ b/net/ipv6/ip6_checksum.c
1999     @@ -72,6 +72,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
2000     err = udplite_checksum_init(skb, uh);
2001     if (err)
2002     return err;
2003     +
2004     + if (UDP_SKB_CB(skb)->partial_cov) {
2005     + skb->csum = ip6_compute_pseudo(skb, proto);
2006     + return 0;
2007     + }
2008     }
2009    
2010     /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
2011     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2012     index db6d437002a6..d4d84da28672 100644
2013     --- a/net/ipv6/sit.c
2014     +++ b/net/ipv6/sit.c
2015     @@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
2016     #ifdef CONFIG_IPV6_SIT_6RD
2017     struct ip_tunnel *t = netdev_priv(dev);
2018    
2019     - if (t->dev == sitn->fb_tunnel_dev) {
2020     + if (dev == sitn->fb_tunnel_dev) {
2021     ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
2022     t->ip6rd.relay_prefix = 0;
2023     t->ip6rd.prefixlen = 16;
2024     diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
2025     index c5a5a6959c1b..ffab94d61e1d 100644
2026     --- a/net/mpls/af_mpls.c
2027     +++ b/net/mpls/af_mpls.c
2028     @@ -7,6 +7,7 @@
2029     #include <linux/if_arp.h>
2030     #include <linux/ipv6.h>
2031     #include <linux/mpls.h>
2032     +#include <linux/nospec.h>
2033     #include <linux/vmalloc.h>
2034     #include <net/ip.h>
2035     #include <net/dst.h>
2036     @@ -756,6 +757,22 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
2037     return err;
2038     }
2039    
2040     +static bool mpls_label_ok(struct net *net, unsigned int *index)
2041     +{
2042     + bool is_ok = true;
2043     +
2044     + /* Reserved labels may not be set */
2045     + if (*index < MPLS_LABEL_FIRST_UNRESERVED)
2046     + is_ok = false;
2047     +
2048     + /* The full 20 bit range may not be supported. */
2049     + if (is_ok && *index >= net->mpls.platform_labels)
2050     + is_ok = false;
2051     +
2052     + *index = array_index_nospec(*index, net->mpls.platform_labels);
2053     + return is_ok;
2054     +}
2055     +
2056     static int mpls_route_add(struct mpls_route_config *cfg)
2057     {
2058     struct mpls_route __rcu **platform_label;
2059     @@ -774,12 +791,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
2060     index = find_free_label(net);
2061     }
2062    
2063     - /* Reserved labels may not be set */
2064     - if (index < MPLS_LABEL_FIRST_UNRESERVED)
2065     - goto errout;
2066     -
2067     - /* The full 20 bit range may not be supported. */
2068     - if (index >= net->mpls.platform_labels)
2069     + if (!mpls_label_ok(net, &index))
2070     goto errout;
2071    
2072     /* Append makes no sense with mpls */
2073     @@ -840,12 +852,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
2074    
2075     index = cfg->rc_label;
2076    
2077     - /* Reserved labels may not be removed */
2078     - if (index < MPLS_LABEL_FIRST_UNRESERVED)
2079     - goto errout;
2080     -
2081     - /* The full 20 bit range may not be supported */
2082     - if (index >= net->mpls.platform_labels)
2083     + if (!mpls_label_ok(net, &index))
2084     goto errout;
2085    
2086     mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
2087     @@ -1279,10 +1286,9 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2088     &cfg->rc_label))
2089     goto errout;
2090    
2091     - /* Reserved labels may not be set */
2092     - if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
2093     + if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
2094     + &cfg->rc_label))
2095     goto errout;
2096     -
2097     break;
2098     }
2099     case RTA_VIA:
2100     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2101     index e1c123d4cdda..c1f59a06da6f 100644
2102     --- a/net/netlink/af_netlink.c
2103     +++ b/net/netlink/af_netlink.c
2104     @@ -2258,7 +2258,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2105     if (cb->start) {
2106     ret = cb->start(cb);
2107     if (ret)
2108     - goto error_unlock;
2109     + goto error_put;
2110     }
2111    
2112     nlk->cb_running = true;
2113     @@ -2278,6 +2278,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2114     */
2115     return -EINTR;
2116    
2117     +error_put:
2118     + module_put(control->module);
2119     error_unlock:
2120     sock_put(sk);
2121     mutex_unlock(nlk->cb_mutex);
2122     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
2123     index 49c28e8ef01b..11702016c900 100644
2124     --- a/net/netlink/genetlink.c
2125     +++ b/net/netlink/genetlink.c
2126     @@ -1103,6 +1103,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
2127     {
2128     struct sk_buff *tmp;
2129     struct net *net, *prev = NULL;
2130     + bool delivered = false;
2131     int err;
2132    
2133     for_each_net_rcu(net) {
2134     @@ -1114,14 +1115,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
2135     }
2136     err = nlmsg_multicast(prev->genl_sock, tmp,
2137     portid, group, flags);
2138     - if (err)
2139     + if (!err)
2140     + delivered = true;
2141     + else if (err != -ESRCH)
2142     goto error;
2143     }
2144    
2145     prev = net;
2146     }
2147    
2148     - return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
2149     + err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
2150     + if (!err)
2151     + delivered = true;
2152     + else if (err != -ESRCH)
2153     + goto error;
2154     + return delivered ? 0 : -ESRCH;
2155     error:
2156     kfree_skb(skb);
2157     return err;
2158     diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
2159     index 5dab1ff3a6c2..59d328603312 100644
2160     --- a/net/rxrpc/output.c
2161     +++ b/net/rxrpc/output.c
2162     @@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
2163     (char *)&opt, sizeof(opt));
2164     if (ret == 0) {
2165     ret = kernel_sendmsg(conn->params.local->socket, &msg,
2166     - iov, 1, iov[0].iov_len);
2167     + iov, 2, len);
2168    
2169     opt = IPV6_PMTUDISC_DO;
2170     kernel_setsockopt(conn->params.local->socket,
2171     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2172     index 5d015270e454..11f69d4c5619 100644
2173     --- a/net/sctp/ipv6.c
2174     +++ b/net/sctp/ipv6.c
2175     @@ -324,8 +324,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2176     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
2177     bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
2178    
2179     - if (!IS_ERR(bdst) &&
2180     - ipv6_chk_addr(dev_net(bdst->dev),
2181     + if (IS_ERR(bdst))
2182     + continue;
2183     +
2184     + if (ipv6_chk_addr(dev_net(bdst->dev),
2185     &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
2186     if (!IS_ERR_OR_NULL(dst))
2187     dst_release(dst);
2188     @@ -334,8 +336,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2189     }
2190    
2191     bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
2192     - if (matchlen > bmatchlen)
2193     + if (matchlen > bmatchlen) {
2194     + dst_release(bdst);
2195     continue;
2196     + }
2197    
2198     if (!IS_ERR_OR_NULL(dst))
2199     dst_release(dst);
2200     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2201     index 7b523e3f551f..fb7b7632316a 100644
2202     --- a/net/sctp/protocol.c
2203     +++ b/net/sctp/protocol.c
2204     @@ -510,22 +510,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2205     if (IS_ERR(rt))
2206     continue;
2207    
2208     - if (!dst)
2209     - dst = &rt->dst;
2210     -
2211     /* Ensure the src address belongs to the output
2212     * interface.
2213     */
2214     odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
2215     false);
2216     if (!odev || odev->ifindex != fl4->flowi4_oif) {
2217     - if (&rt->dst != dst)
2218     + if (!dst)
2219     + dst = &rt->dst;
2220     + else
2221     dst_release(&rt->dst);
2222     continue;
2223     }
2224    
2225     - if (dst != &rt->dst)
2226     - dst_release(dst);
2227     + dst_release(dst);
2228     dst = &rt->dst;
2229     break;
2230     }
2231     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
2232     index 9e9690b7afe1..fc67d356b5fa 100644
2233     --- a/net/sctp/sm_make_chunk.c
2234     +++ b/net/sctp/sm_make_chunk.c
2235     @@ -1373,9 +1373,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
2236     sctp_chunkhdr_t *chunk_hdr;
2237     struct sk_buff *skb;
2238     struct sock *sk;
2239     + int chunklen;
2240     +
2241     + chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
2242     + if (chunklen > SCTP_MAX_CHUNK_LEN)
2243     + goto nodata;
2244    
2245     /* No need to allocate LL here, as this is only a chunk. */
2246     - skb = alloc_skb(SCTP_PAD4(sizeof(sctp_chunkhdr_t) + paylen), gfp);
2247     + skb = alloc_skb(chunklen, gfp);
2248     if (!skb)
2249     goto nodata;
2250    
2251     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2252     index 293f3f213776..ceb162a9dcfd 100644
2253     --- a/sound/pci/hda/hda_intel.c
2254     +++ b/sound/pci/hda/hda_intel.c
2255     @@ -180,7 +180,7 @@ static const struct kernel_param_ops param_ops_xint = {
2256     };
2257     #define param_check_xint param_check_int
2258    
2259     -static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
2260     +static int power_save = -1;
2261     module_param(power_save, xint, 0644);
2262     MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
2263     "(in second, 0 = disable).");
2264     @@ -2042,6 +2042,24 @@ static int azx_probe(struct pci_dev *pci,
2265     return err;
2266     }
2267    
2268     +#ifdef CONFIG_PM
2269     +/* On some boards setting power_save to a non 0 value leads to clicking /
2270     + * popping sounds when ever we enter/leave powersaving mode. Ideally we would
2271     + * figure out how to avoid these sounds, but that is not always feasible.
2272     + * So we keep a list of devices where we disable powersaving as its known
2273     + * to causes problems on these devices.
2274     + */
2275     +static struct snd_pci_quirk power_save_blacklist[] = {
2276     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2277     + SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
2278     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2279     + SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
2280     + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
2281     + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
2282     + {}
2283     +};
2284     +#endif /* CONFIG_PM */
2285     +
2286     /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
2287     static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
2288     [AZX_DRIVER_NVIDIA] = 8,
2289     @@ -2054,6 +2072,7 @@ static int azx_probe_continue(struct azx *chip)
2290     struct hdac_bus *bus = azx_bus(chip);
2291     struct pci_dev *pci = chip->pci;
2292     int dev = chip->dev_index;
2293     + int val;
2294     int err;
2295    
2296     hda->probe_continued = 1;
2297     @@ -2129,7 +2148,22 @@ static int azx_probe_continue(struct azx *chip)
2298    
2299     chip->running = 1;
2300     azx_add_card_list(chip);
2301     - snd_hda_set_power_save(&chip->bus, power_save * 1000);
2302     +
2303     + val = power_save;
2304     +#ifdef CONFIG_PM
2305     + if (val == -1) {
2306     + const struct snd_pci_quirk *q;
2307     +
2308     + val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
2309     + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
2310     + if (q && val) {
2311     + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
2312     + q->subvendor, q->subdevice);
2313     + val = 0;
2314     + }
2315     + }
2316     +#endif /* CONFIG_PM */
2317     + snd_hda_set_power_save(&chip->bus, val * 1000);
2318     if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
2319     pm_runtime_put_autosuspend(&pci->dev);
2320    
2321     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2322     index 89c166b97e81..974b74e91ef0 100644
2323     --- a/sound/pci/hda/patch_realtek.c
2324     +++ b/sound/pci/hda/patch_realtek.c
2325     @@ -4480,13 +4480,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
2326    
2327     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2328     spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2329     + snd_hda_apply_pincfgs(codec, pincfgs);
2330     + } else if (action == HDA_FIXUP_ACT_INIT) {
2331     /* Enable DOCK device */
2332     snd_hda_codec_write(codec, 0x17, 0,
2333     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
2334     /* Enable DOCK device */
2335     snd_hda_codec_write(codec, 0x19, 0,
2336     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
2337     - snd_hda_apply_pincfgs(codec, pincfgs);
2338     }
2339     }
2340    
2341     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2342     index 8a59d4782a0f..69bf5cf1e91e 100644
2343     --- a/sound/usb/quirks-table.h
2344     +++ b/sound/usb/quirks-table.h
2345     @@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2346     }
2347     },
2348    
2349     +{
2350     + /*
2351     + * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
2352     + * even though it advertises more. The capture interface doesn't work
2353     + * even on windows.
2354     + */
2355     + USB_DEVICE(0x19b5, 0x0021),
2356     + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2357     + .ifnum = QUIRK_ANY_INTERFACE,
2358     + .type = QUIRK_COMPOSITE,
2359     + .data = (const struct snd_usb_audio_quirk[]) {
2360     + {
2361     + .ifnum = 0,
2362     + .type = QUIRK_AUDIO_STANDARD_MIXER,
2363     + },
2364     + /* Capture */
2365     + {
2366     + .ifnum = 1,
2367     + .type = QUIRK_IGNORE_INTERFACE,
2368     + },
2369     + /* Playback */
2370     + {
2371     + .ifnum = 2,
2372     + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
2373     + .data = &(const struct audioformat) {
2374     + .formats = SNDRV_PCM_FMTBIT_S16_LE,
2375     + .channels = 2,
2376     + .iface = 2,
2377     + .altsetting = 1,
2378     + .altset_idx = 1,
2379     + .attributes = UAC_EP_CS_ATTR_FILL_MAX |
2380     + UAC_EP_CS_ATTR_SAMPLE_RATE,
2381     + .endpoint = 0x03,
2382     + .ep_attr = USB_ENDPOINT_XFER_ISOC,
2383     + .rates = SNDRV_PCM_RATE_48000,
2384     + .rate_min = 48000,
2385     + .rate_max = 48000,
2386     + .nr_rates = 1,
2387     + .rate_table = (unsigned int[]) {
2388     + 48000
2389     + }
2390     + }
2391     + },
2392     + }
2393     + }
2394     +},
2395     +
2396     #undef USB_DEVICE_VENDOR_SPEC
2397     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2398     index 1b20768e781d..eaae7252f60c 100644
2399     --- a/virt/kvm/kvm_main.c
2400     +++ b/virt/kvm/kvm_main.c
2401     @@ -976,8 +976,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
2402     /* Check for overlaps */
2403     r = -EEXIST;
2404     kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
2405     - if ((slot->id >= KVM_USER_MEM_SLOTS) ||
2406     - (slot->id == id))
2407     + if (slot->id == id)
2408     continue;
2409     if (!((base_gfn + npages <= slot->base_gfn) ||
2410     (base_gfn >= slot->base_gfn + slot->npages)))