Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.15/0107-4.15.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3091 - (hide annotations) (download)
Wed Mar 21 14:52:42 2018 UTC (6 years, 1 month ago) by niro
File size: 158336 byte(s)
-linux-4.15.8
1 niro 3091 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2     index 46c7e1085efc..e269541a7d10 100644
3     --- a/Documentation/networking/ip-sysctl.txt
4     +++ b/Documentation/networking/ip-sysctl.txt
5     @@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
6     min: Minimal size of receive buffer used by TCP sockets.
7     It is guaranteed to each TCP socket, even under moderate memory
8     pressure.
9     - Default: 1 page
10     + Default: 4K
11    
12     default: initial size of receive buffer used by TCP sockets.
13     This value overrides net.core.rmem_default used by other protocols.
14     @@ -666,7 +666,7 @@ tcp_window_scaling - BOOLEAN
15     tcp_wmem - vector of 3 INTEGERs: min, default, max
16     min: Amount of memory reserved for send buffers for TCP sockets.
17     Each TCP socket has rights to use it due to fact of its birth.
18     - Default: 1 page
19     + Default: 4K
20    
21     default: initial size of send buffer used by TCP sockets. This
22     value overrides net.core.wmem_default used by other protocols.
23     diff --git a/Makefile b/Makefile
24     index 49f524444050..eb18d200a603 100644
25     --- a/Makefile
26     +++ b/Makefile
27     @@ -1,7 +1,7 @@
28     # SPDX-License-Identifier: GPL-2.0
29     VERSION = 4
30     PATCHLEVEL = 15
31     -SUBLEVEL = 7
32     +SUBLEVEL = 8
33     EXTRAVERSION =
34     NAME = Fearless Coyote
35    
36     diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
37     index 29cb804d10cc..06cce72508a2 100644
38     --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
39     +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
40     @@ -98,6 +98,8 @@
41     };
42    
43     &i2c1 {
44     + pinctrl-names = "default";
45     + pinctrl-0 = <&i2c1_pins>;
46     clock-frequency = <2600000>;
47    
48     twl: twl@48 {
49     @@ -216,7 +218,12 @@
50     >;
51     };
52    
53     -
54     + i2c1_pins: pinmux_i2c1_pins {
55     + pinctrl-single,pins = <
56     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
57     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
58     + >;
59     + };
60     };
61    
62     &omap3_pmx_wkup {
63     diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
64     index 6d89736c7b44..cf22b35f0a28 100644
65     --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
66     +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
67     @@ -104,6 +104,8 @@
68     };
69    
70     &i2c1 {
71     + pinctrl-names = "default";
72     + pinctrl-0 = <&i2c1_pins>;
73     clock-frequency = <2600000>;
74    
75     twl: twl@48 {
76     @@ -211,6 +213,12 @@
77     OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
78     >;
79     };
80     + i2c1_pins: pinmux_i2c1_pins {
81     + pinctrl-single,pins = <
82     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
83     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
84     + >;
85     + };
86     };
87    
88     &uart2 {
89     diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
90     index 99cfae875e12..5eae4776ffde 100644
91     --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
92     +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
93     @@ -110,26 +110,6 @@
94     };
95     };
96    
97     -&cpu0 {
98     - cpu0-supply = <&vdd_cpu>;
99     - operating-points = <
100     - /* KHz uV */
101     - 1800000 1400000
102     - 1608000 1350000
103     - 1512000 1300000
104     - 1416000 1200000
105     - 1200000 1100000
106     - 1008000 1050000
107     - 816000 1000000
108     - 696000 950000
109     - 600000 900000
110     - 408000 900000
111     - 312000 900000
112     - 216000 900000
113     - 126000 900000
114     - >;
115     -};
116     -
117     &emmc {
118     status = "okay";
119     bus-width = <8>;
120     diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
121     index 5638ce0c9524..63d6b404d88e 100644
122     --- a/arch/arm/kvm/hyp/Makefile
123     +++ b/arch/arm/kvm/hyp/Makefile
124     @@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
125    
126     KVM=../../../../virt/kvm
127    
128     +CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
129     +
130     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
131     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
132     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
133     @@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
134     obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
135     obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
136     obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
137     +CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
138     +
139     obj-$(CONFIG_KVM_ARM_HOST) += entry.o
140     obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
141     obj-$(CONFIG_KVM_ARM_HOST) += switch.o
142     +CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
143     obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
144     diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
145     index 111bda8cdebd..be4b8b0a40ad 100644
146     --- a/arch/arm/kvm/hyp/banked-sr.c
147     +++ b/arch/arm/kvm/hyp/banked-sr.c
148     @@ -20,6 +20,10 @@
149    
150     #include <asm/kvm_hyp.h>
151    
152     +/*
153     + * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
154     + * trick the assembler.
155     + */
156     __asm__(".arch_extension virt");
157    
158     void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
159     diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
160     index 9b49867154bf..63fa79f9f121 100644
161     --- a/arch/arm/mach-mvebu/Kconfig
162     +++ b/arch/arm/mach-mvebu/Kconfig
163     @@ -42,7 +42,7 @@ config MACH_ARMADA_375
164     depends on ARCH_MULTI_V7
165     select ARMADA_370_XP_IRQ
166     select ARM_ERRATA_720789
167     - select ARM_ERRATA_753970
168     + select PL310_ERRATA_753970
169     select ARM_GIC
170     select ARMADA_375_CLK
171     select HAVE_ARM_SCU
172     @@ -58,7 +58,7 @@ config MACH_ARMADA_38X
173     bool "Marvell Armada 380/385 boards"
174     depends on ARCH_MULTI_V7
175     select ARM_ERRATA_720789
176     - select ARM_ERRATA_753970
177     + select PL310_ERRATA_753970
178     select ARM_GIC
179     select ARM_GLOBAL_TIMER
180     select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
181     diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
182     index aff6994950ba..a2399fd66e97 100644
183     --- a/arch/arm/plat-orion/common.c
184     +++ b/arch/arm/plat-orion/common.c
185     @@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
186     /*****************************************************************************
187     * Ethernet switch
188     ****************************************************************************/
189     -static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
190     -static __initdata struct mdio_board_info
191     - orion_ge00_switch_board_info;
192     +static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
193     + .bus_id = "orion-mii",
194     + .modalias = "mv88e6085",
195     +};
196    
197     void __init orion_ge00_switch_init(struct dsa_chip_data *d)
198     {
199     - struct mdio_board_info *bd;
200     unsigned int i;
201    
202     if (!IS_BUILTIN(CONFIG_PHYLIB))
203     return;
204    
205     - for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
206     - if (!strcmp(d->port_names[i], "cpu"))
207     + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
208     + if (!strcmp(d->port_names[i], "cpu")) {
209     + d->netdev[i] = &orion_ge00.dev;
210     break;
211     + }
212     + }
213    
214     - bd = &orion_ge00_switch_board_info;
215     - bd->bus_id = orion_ge00_mvmdio_bus_name;
216     - bd->mdio_addr = d->sw_addr;
217     - d->netdev[i] = &orion_ge00.dev;
218     - strcpy(bd->modalias, "mv88e6085");
219     - bd->platform_data = d;
220     + orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
221     + orion_ge00_switch_board_info.platform_data = d;
222    
223     mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
224     }
225     diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
226     index 3742508cc534..bd5ce31936f5 100644
227     --- a/arch/parisc/include/asm/cacheflush.h
228     +++ b/arch/parisc/include/asm/cacheflush.h
229     @@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
230     void flush_kernel_icache_range_asm(unsigned long, unsigned long);
231     void flush_user_dcache_range_asm(unsigned long, unsigned long);
232     void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
233     +void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
234     void flush_kernel_dcache_page_asm(void *);
235     void flush_kernel_icache_page(void *);
236    
237     diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
238     index 0e6ab6e4a4e9..2dbe5580a1a4 100644
239     --- a/arch/parisc/include/asm/processor.h
240     +++ b/arch/parisc/include/asm/processor.h
241     @@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
242     #define parisc_requires_coherency() (0)
243     #endif
244    
245     +extern int running_on_qemu;
246     +
247     #endif /* __ASSEMBLY__ */
248    
249     #endif /* __ASM_PARISC_PROCESSOR_H */
250     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
251     index 19c0c141bc3f..79089778725b 100644
252     --- a/arch/parisc/kernel/cache.c
253     +++ b/arch/parisc/kernel/cache.c
254     @@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
255     int __flush_tlb_range(unsigned long sid, unsigned long start,
256     unsigned long end)
257     {
258     - unsigned long flags, size;
259     + unsigned long flags;
260    
261     - size = (end - start);
262     - if (size >= parisc_tlb_flush_threshold) {
263     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
264     + end - start >= parisc_tlb_flush_threshold) {
265     flush_tlb_all();
266     return 1;
267     }
268     @@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
269     struct vm_area_struct *vma;
270     pgd_t *pgd;
271    
272     - /* Flush the TLB to avoid speculation if coherency is required. */
273     - if (parisc_requires_coherency())
274     - flush_tlb_all();
275     -
276     /* Flushing the whole cache on each cpu takes forever on
277     rp3440, etc. So, avoid it if the mm isn't too big. */
278     - if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
279     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
280     + mm_total_size(mm) >= parisc_cache_flush_threshold) {
281     + flush_tlb_all();
282     flush_cache_all();
283     return;
284     }
285     @@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
286     if (mm->context == mfsp(3)) {
287     for (vma = mm->mmap; vma; vma = vma->vm_next) {
288     flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
289     - if ((vma->vm_flags & VM_EXEC) == 0)
290     - continue;
291     - flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
292     + if (vma->vm_flags & VM_EXEC)
293     + flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
294     + flush_tlb_range(vma, vma->vm_start, vma->vm_end);
295     }
296     return;
297     }
298     @@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
299     void flush_cache_range(struct vm_area_struct *vma,
300     unsigned long start, unsigned long end)
301     {
302     - BUG_ON(!vma->vm_mm->context);
303     -
304     - /* Flush the TLB to avoid speculation if coherency is required. */
305     - if (parisc_requires_coherency())
306     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
307     + end - start >= parisc_cache_flush_threshold) {
308     flush_tlb_range(vma, start, end);
309     -
310     - if ((end - start) >= parisc_cache_flush_threshold
311     - || vma->vm_mm->context != mfsp(3)) {
312     flush_cache_all();
313     return;
314     }
315     @@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
316     flush_user_dcache_range_asm(start, end);
317     if (vma->vm_flags & VM_EXEC)
318     flush_user_icache_range_asm(start, end);
319     + flush_tlb_range(vma, start, end);
320     }
321    
322     void
323     @@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
324     BUG_ON(!vma->vm_mm->context);
325    
326     if (pfn_valid(pfn)) {
327     - if (parisc_requires_coherency())
328     - flush_tlb_page(vma, vmaddr);
329     + flush_tlb_page(vma, vmaddr);
330     __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
331     }
332     }
333     @@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
334     void flush_kernel_vmap_range(void *vaddr, int size)
335     {
336     unsigned long start = (unsigned long)vaddr;
337     + unsigned long end = start + size;
338    
339     - if ((unsigned long)size > parisc_cache_flush_threshold)
340     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
341     + (unsigned long)size >= parisc_cache_flush_threshold) {
342     + flush_tlb_kernel_range(start, end);
343     flush_data_cache();
344     - else
345     - flush_kernel_dcache_range_asm(start, start + size);
346     + return;
347     + }
348     +
349     + flush_kernel_dcache_range_asm(start, end);
350     + flush_tlb_kernel_range(start, end);
351     }
352     EXPORT_SYMBOL(flush_kernel_vmap_range);
353    
354     void invalidate_kernel_vmap_range(void *vaddr, int size)
355     {
356     unsigned long start = (unsigned long)vaddr;
357     + unsigned long end = start + size;
358    
359     - if ((unsigned long)size > parisc_cache_flush_threshold)
360     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
361     + (unsigned long)size >= parisc_cache_flush_threshold) {
362     + flush_tlb_kernel_range(start, end);
363     flush_data_cache();
364     - else
365     - flush_kernel_dcache_range_asm(start, start + size);
366     + return;
367     + }
368     +
369     + purge_kernel_dcache_range_asm(start, end);
370     + flush_tlb_kernel_range(start, end);
371     }
372     EXPORT_SYMBOL(invalidate_kernel_vmap_range);
373     diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
374     index 2d40c4ff3f69..67b0f7532e83 100644
375     --- a/arch/parisc/kernel/pacache.S
376     +++ b/arch/parisc/kernel/pacache.S
377     @@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
378     .procend
379     ENDPROC_CFI(flush_kernel_dcache_range_asm)
380    
381     +ENTRY_CFI(purge_kernel_dcache_range_asm)
382     + .proc
383     + .callinfo NO_CALLS
384     + .entry
385     +
386     + ldil L%dcache_stride, %r1
387     + ldw R%dcache_stride(%r1), %r23
388     + ldo -1(%r23), %r21
389     + ANDCM %r26, %r21, %r26
390     +
391     +1: cmpb,COND(<<),n %r26, %r25,1b
392     + pdc,m %r23(%r26)
393     +
394     + sync
395     + syncdma
396     + bv %r0(%r2)
397     + nop
398     + .exit
399     +
400     + .procend
401     +ENDPROC_CFI(purge_kernel_dcache_range_asm)
402     +
403     ENTRY_CFI(flush_user_icache_range_asm)
404     .proc
405     .callinfo NO_CALLS
406     diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
407     index 4b8fd6dc22da..f7e684560186 100644
408     --- a/arch/parisc/kernel/time.c
409     +++ b/arch/parisc/kernel/time.c
410     @@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
411     next_tick = cpuinfo->it_value;
412    
413     /* Calculate how many ticks have elapsed. */
414     + now = mfctl(16);
415     do {
416     ++ticks_elapsed;
417     next_tick += cpt;
418     - now = mfctl(16);
419     } while (next_tick - now > cpt);
420    
421     /* Store (in CR16 cycles) up to when we are accounting right now. */
422     @@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
423     * if one or the other wrapped. If "now" is "bigger" we'll end up
424     * with a very large unsigned number.
425     */
426     - while (next_tick - mfctl(16) > cpt)
427     + now = mfctl(16);
428     + while (next_tick - now > cpt)
429     next_tick += cpt;
430    
431     /* Program the IT when to deliver the next interrupt.
432     * Only bottom 32-bits of next_tick are writable in CR16!
433     * Timer interrupt will be delivered at least a few hundred cycles
434     - * after the IT fires, so if we are too close (<= 500 cycles) to the
435     + * after the IT fires, so if we are too close (<= 8000 cycles) to the
436     * next cycle, simply skip it.
437     */
438     - if (next_tick - mfctl(16) <= 500)
439     + if (next_tick - now <= 8000)
440     next_tick += cpt;
441     mtctl(next_tick, 16);
442    
443     @@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
444     * different sockets, so mark them unstable and lower rating on
445     * multi-socket SMP systems.
446     */
447     - if (num_online_cpus() > 1) {
448     + if (num_online_cpus() > 1 && !running_on_qemu) {
449     int cpu;
450     unsigned long cpu0_loc;
451     cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
452     diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
453     index 48f41399fc0b..cab32ee824d2 100644
454     --- a/arch/parisc/mm/init.c
455     +++ b/arch/parisc/mm/init.c
456     @@ -629,7 +629,12 @@ void __init mem_init(void)
457     #endif
458    
459     mem_init_print_info(NULL);
460     -#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
461     +
462     +#if 0
463     + /*
464     + * Do not expose the virtual kernel memory layout to userspace.
465     + * But keep code for debugging purposes.
466     + */
467     printk("virtual kernel memory layout:\n"
468     " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
469     " memory : 0x%px - 0x%px (%4ld MB)\n"
470     diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
471     index 17ae5c15a9e0..804ba030d859 100644
472     --- a/arch/powerpc/mm/pgtable-radix.c
473     +++ b/arch/powerpc/mm/pgtable-radix.c
474     @@ -21,6 +21,7 @@
475    
476     #include <asm/pgtable.h>
477     #include <asm/pgalloc.h>
478     +#include <asm/mmu_context.h>
479     #include <asm/dma.h>
480     #include <asm/machdep.h>
481     #include <asm/mmu.h>
482     @@ -334,6 +335,22 @@ static void __init radix_init_pgtable(void)
483     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
484     asm volatile("eieio; tlbsync; ptesync" : : : "memory");
485     trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
486     +
487     + /*
488     + * The init_mm context is given the first available (non-zero) PID,
489     + * which is the "guard PID" and contains no page table. PIDR should
490     + * never be set to zero because that duplicates the kernel address
491     + * space at the 0x0... offset (quadrant 0)!
492     + *
493     + * An arbitrary PID that may later be allocated by the PID allocator
494     + * for userspace processes must not be used either, because that
495     + * would cause stale user mappings for that PID on CPUs outside of
496     + * the TLB invalidation scheme (because it won't be in mm_cpumask).
497     + *
498     + * So permanently carve out one PID for the purpose of a guard PID.
499     + */
500     + init_mm.context.id = mmu_base_pid;
501     + mmu_base_pid++;
502     }
503    
504     static void __init radix_init_partition_table(void)
505     @@ -580,6 +597,8 @@ void __init radix__early_init_mmu(void)
506    
507     radix_init_iamr();
508     radix_init_pgtable();
509     + /* Switch to the guard PID before turning on MMU */
510     + radix__switch_mmu_context(NULL, &init_mm);
511     }
512    
513     void radix__early_init_mmu_secondary(void)
514     @@ -601,6 +620,7 @@ void radix__early_init_mmu_secondary(void)
515     radix_init_amor();
516     }
517     radix_init_iamr();
518     + radix__switch_mmu_context(NULL, &init_mm);
519     }
520    
521     void radix__mmu_cleanup_all(void)
522     diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
523     index 81d8614e7379..5e1ef9150182 100644
524     --- a/arch/powerpc/platforms/pseries/ras.c
525     +++ b/arch/powerpc/platforms/pseries/ras.c
526     @@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
527     static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
528    
529    
530     +/*
531     + * Enable the hotplug interrupt late because processing them may touch other
532     + * devices or systems (e.g. hugepages) that have not been initialized at the
533     + * subsys stage.
534     + */
535     +int __init init_ras_hotplug_IRQ(void)
536     +{
537     + struct device_node *np;
538     +
539     + /* Hotplug Events */
540     + np = of_find_node_by_path("/event-sources/hot-plug-events");
541     + if (np != NULL) {
542     + if (dlpar_workqueue_init() == 0)
543     + request_event_sources_irqs(np, ras_hotplug_interrupt,
544     + "RAS_HOTPLUG");
545     + of_node_put(np);
546     + }
547     +
548     + return 0;
549     +}
550     +machine_late_initcall(pseries, init_ras_hotplug_IRQ);
551     +
552     /*
553     * Initialize handlers for the set of interrupts caused by hardware errors
554     * and power system events.
555     @@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
556     of_node_put(np);
557     }
558    
559     - /* Hotplug Events */
560     - np = of_find_node_by_path("/event-sources/hot-plug-events");
561     - if (np != NULL) {
562     - if (dlpar_workqueue_init() == 0)
563     - request_event_sources_irqs(np, ras_hotplug_interrupt,
564     - "RAS_HOTPLUG");
565     - of_node_put(np);
566     - }
567     -
568     /* EPOW Events */
569     np = of_find_node_by_path("/event-sources/epow-events");
570     if (np != NULL) {
571     diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
572     index 024ad8bcc516..5b8089b0d3ee 100644
573     --- a/arch/s390/kvm/interrupt.c
574     +++ b/arch/s390/kvm/interrupt.c
575     @@ -170,8 +170,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
576    
577     static int ckc_irq_pending(struct kvm_vcpu *vcpu)
578     {
579     - if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
580     + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
581     + const u64 ckc = vcpu->arch.sie_block->ckc;
582     +
583     + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
584     + if ((s64)ckc >= (s64)now)
585     + return 0;
586     + } else if (ckc >= now) {
587     return 0;
588     + }
589     return ckc_interrupts_enabled(vcpu);
590     }
591    
592     @@ -1011,13 +1018,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
593    
594     static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
595     {
596     - u64 now, cputm, sltime = 0;
597     + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
598     + const u64 ckc = vcpu->arch.sie_block->ckc;
599     + u64 cputm, sltime = 0;
600    
601     if (ckc_interrupts_enabled(vcpu)) {
602     - now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
603     - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
604     - /* already expired or overflow? */
605     - if (!sltime || vcpu->arch.sie_block->ckc <= now)
606     + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
607     + if ((s64)now < (s64)ckc)
608     + sltime = tod_to_ns((s64)ckc - (s64)now);
609     + } else if (now < ckc) {
610     + sltime = tod_to_ns(ckc - now);
611     + }
612     + /* already expired */
613     + if (!sltime)
614     return 0;
615     if (cpu_timer_interrupts_enabled(vcpu)) {
616     cputm = kvm_s390_get_cpu_timer(vcpu);
617     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
618     index 1371dff2b90d..5c03e371b7b8 100644
619     --- a/arch/s390/kvm/kvm-s390.c
620     +++ b/arch/s390/kvm/kvm-s390.c
621     @@ -166,6 +166,28 @@ int kvm_arch_hardware_enable(void)
622     static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
623     unsigned long end);
624    
625     +static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
626     +{
627     + u8 delta_idx = 0;
628     +
629     + /*
630     + * The TOD jumps by delta, we have to compensate this by adding
631     + * -delta to the epoch.
632     + */
633     + delta = -delta;
634     +
635     + /* sign-extension - we're adding to signed values below */
636     + if ((s64)delta < 0)
637     + delta_idx = -1;
638     +
639     + scb->epoch += delta;
640     + if (scb->ecd & ECD_MEF) {
641     + scb->epdx += delta_idx;
642     + if (scb->epoch < delta)
643     + scb->epdx += 1;
644     + }
645     +}
646     +
647     /*
648     * This callback is executed during stop_machine(). All CPUs are therefore
649     * temporarily stopped. In order not to change guest behavior, we have to
650     @@ -181,13 +203,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
651     unsigned long long *delta = v;
652    
653     list_for_each_entry(kvm, &vm_list, vm_list) {
654     - kvm->arch.epoch -= *delta;
655     kvm_for_each_vcpu(i, vcpu, kvm) {
656     - vcpu->arch.sie_block->epoch -= *delta;
657     + kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
658     + if (i == 0) {
659     + kvm->arch.epoch = vcpu->arch.sie_block->epoch;
660     + kvm->arch.epdx = vcpu->arch.sie_block->epdx;
661     + }
662     if (vcpu->arch.cputm_enabled)
663     vcpu->arch.cputm_start += *delta;
664     if (vcpu->arch.vsie_block)
665     - vcpu->arch.vsie_block->epoch -= *delta;
666     + kvm_clock_sync_scb(vcpu->arch.vsie_block,
667     + *delta);
668     }
669     }
670     return NOTIFY_OK;
671     @@ -889,12 +915,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
672     if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
673     return -EFAULT;
674    
675     - if (test_kvm_facility(kvm, 139))
676     - kvm_s390_set_tod_clock_ext(kvm, &gtod);
677     - else if (gtod.epoch_idx == 0)
678     - kvm_s390_set_tod_clock(kvm, gtod.tod);
679     - else
680     + if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
681     return -EINVAL;
682     + kvm_s390_set_tod_clock(kvm, &gtod);
683    
684     VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
685     gtod.epoch_idx, gtod.tod);
686     @@ -919,13 +942,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
687    
688     static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
689     {
690     - u64 gtod;
691     + struct kvm_s390_vm_tod_clock gtod = { 0 };
692    
693     - if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
694     + if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
695     + sizeof(gtod.tod)))
696     return -EFAULT;
697    
698     - kvm_s390_set_tod_clock(kvm, gtod);
699     - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
700     + kvm_s390_set_tod_clock(kvm, &gtod);
701     + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
702     return 0;
703     }
704    
705     @@ -2361,6 +2385,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
706     mutex_lock(&vcpu->kvm->lock);
707     preempt_disable();
708     vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
709     + vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
710     preempt_enable();
711     mutex_unlock(&vcpu->kvm->lock);
712     if (!kvm_is_ucontrol(vcpu->kvm)) {
713     @@ -2947,8 +2972,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
714     return 0;
715     }
716    
717     -void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
718     - const struct kvm_s390_vm_tod_clock *gtod)
719     +void kvm_s390_set_tod_clock(struct kvm *kvm,
720     + const struct kvm_s390_vm_tod_clock *gtod)
721     {
722     struct kvm_vcpu *vcpu;
723     struct kvm_s390_tod_clock_ext htod;
724     @@ -2960,10 +2985,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
725     get_tod_clock_ext((char *)&htod);
726    
727     kvm->arch.epoch = gtod->tod - htod.tod;
728     - kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
729     -
730     - if (kvm->arch.epoch > gtod->tod)
731     - kvm->arch.epdx -= 1;
732     + kvm->arch.epdx = 0;
733     + if (test_kvm_facility(kvm, 139)) {
734     + kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
735     + if (kvm->arch.epoch > gtod->tod)
736     + kvm->arch.epdx -= 1;
737     + }
738    
739     kvm_s390_vcpu_block_all(kvm);
740     kvm_for_each_vcpu(i, vcpu, kvm) {
741     @@ -2976,22 +3003,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
742     mutex_unlock(&kvm->lock);
743     }
744    
745     -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
746     -{
747     - struct kvm_vcpu *vcpu;
748     - int i;
749     -
750     - mutex_lock(&kvm->lock);
751     - preempt_disable();
752     - kvm->arch.epoch = tod - get_tod_clock();
753     - kvm_s390_vcpu_block_all(kvm);
754     - kvm_for_each_vcpu(i, vcpu, kvm)
755     - vcpu->arch.sie_block->epoch = kvm->arch.epoch;
756     - kvm_s390_vcpu_unblock_all(kvm);
757     - preempt_enable();
758     - mutex_unlock(&kvm->lock);
759     -}
760     -
761     /**
762     * kvm_arch_fault_in_page - fault-in guest page if necessary
763     * @vcpu: The corresponding virtual cpu
764     diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
765     index 5e46ba429bcb..efa186f065fb 100644
766     --- a/arch/s390/kvm/kvm-s390.h
767     +++ b/arch/s390/kvm/kvm-s390.h
768     @@ -268,9 +268,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
769     int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
770    
771     /* implemented in kvm-s390.c */
772     -void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
773     - const struct kvm_s390_vm_tod_clock *gtod);
774     -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
775     +void kvm_s390_set_tod_clock(struct kvm *kvm,
776     + const struct kvm_s390_vm_tod_clock *gtod);
777     long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
778     int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
779     int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
780     diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
781     index 0714bfa56da0..23bebdbbf490 100644
782     --- a/arch/s390/kvm/priv.c
783     +++ b/arch/s390/kvm/priv.c
784     @@ -81,9 +81,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
785     /* Handle SCK (SET CLOCK) interception */
786     static int handle_set_clock(struct kvm_vcpu *vcpu)
787     {
788     + struct kvm_s390_vm_tod_clock gtod = { 0 };
789     int rc;
790     u8 ar;
791     - u64 op2, val;
792     + u64 op2;
793    
794     if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
795     return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
796     @@ -91,12 +92,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
797     op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
798     if (op2 & 7) /* Operand must be on a doubleword boundary */
799     return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
800     - rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
801     + rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
802     if (rc)
803     return kvm_s390_inject_prog_cond(vcpu, rc);
804    
805     - VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
806     - kvm_s390_set_tod_clock(vcpu->kvm, val);
807     + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
808     + kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
809    
810     kvm_s390_set_psw_cc(vcpu, 0);
811     return 0;
812     diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
813     index e42b8943cb1a..cd0dba7a2293 100644
814     --- a/arch/x86/include/asm/pgtable.h
815     +++ b/arch/x86/include/asm/pgtable.h
816     @@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
817     {
818     pmdval_t v = native_pmd_val(pmd);
819    
820     - return __pmd(v | set);
821     + return native_make_pmd(v | set);
822     }
823    
824     static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
825     {
826     pmdval_t v = native_pmd_val(pmd);
827    
828     - return __pmd(v & ~clear);
829     + return native_make_pmd(v & ~clear);
830     }
831    
832     static inline pmd_t pmd_mkold(pmd_t pmd)
833     @@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
834     {
835     pudval_t v = native_pud_val(pud);
836    
837     - return __pud(v | set);
838     + return native_make_pud(v | set);
839     }
840    
841     static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
842     {
843     pudval_t v = native_pud_val(pud);
844    
845     - return __pud(v & ~clear);
846     + return native_make_pud(v & ~clear);
847     }
848    
849     static inline pud_t pud_mkold(pud_t pud)
850     diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
851     index e55466760ff8..b3ec519e3982 100644
852     --- a/arch/x86/include/asm/pgtable_32.h
853     +++ b/arch/x86/include/asm/pgtable_32.h
854     @@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
855     static inline void pgtable_cache_init(void) { }
856     static inline void check_pgt_cache(void) { }
857     void paging_init(void);
858     +void sync_initial_page_table(void);
859    
860     /*
861     * Define this if things work differently on an i386 and an i486:
862     diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
863     index 81462e9a34f6..1149d2112b2e 100644
864     --- a/arch/x86/include/asm/pgtable_64.h
865     +++ b/arch/x86/include/asm/pgtable_64.h
866     @@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
867     #define swapper_pg_dir init_top_pgt
868    
869     extern void paging_init(void);
870     +static inline void sync_initial_page_table(void) { }
871    
872     #define pte_ERROR(e) \
873     pr_err("%s:%d: bad pte %p(%016lx)\n", \
874     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
875     index 3696398a9475..246f15b4e64c 100644
876     --- a/arch/x86/include/asm/pgtable_types.h
877     +++ b/arch/x86/include/asm/pgtable_types.h
878     @@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
879     #else
880     #include <asm-generic/pgtable-nopud.h>
881    
882     +static inline pud_t native_make_pud(pudval_t val)
883     +{
884     + return (pud_t) { .p4d.pgd = native_make_pgd(val) };
885     +}
886     +
887     static inline pudval_t native_pud_val(pud_t pud)
888     {
889     return native_pgd_val(pud.p4d.pgd);
890     @@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
891     #else
892     #include <asm-generic/pgtable-nopmd.h>
893    
894     +static inline pmd_t native_make_pmd(pmdval_t val)
895     +{
896     + return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
897     +}
898     +
899     static inline pmdval_t native_pmd_val(pmd_t pmd)
900     {
901     return native_pgd_val(pmd.pud.p4d.pgd);
902     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
903     index 68d7ab81c62f..1fbe6b9fff37 100644
904     --- a/arch/x86/kernel/setup.c
905     +++ b/arch/x86/kernel/setup.c
906     @@ -1205,20 +1205,13 @@ void __init setup_arch(char **cmdline_p)
907    
908     kasan_init();
909    
910     -#ifdef CONFIG_X86_32
911     - /* sync back kernel address range */
912     - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
913     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
914     - KERNEL_PGD_PTRS);
915     -
916     /*
917     - * sync back low identity map too. It is used for example
918     - * in the 32-bit EFI stub.
919     + * Sync back kernel address range.
920     + *
921     + * FIXME: Can the later sync in setup_cpu_entry_areas() replace
922     + * this call?
923     */
924     - clone_pgd_range(initial_page_table,
925     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
926     - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
927     -#endif
928     + sync_initial_page_table();
929    
930     tboot_probe();
931    
932     diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
933     index 497aa766fab3..ea554f812ee1 100644
934     --- a/arch/x86/kernel/setup_percpu.c
935     +++ b/arch/x86/kernel/setup_percpu.c
936     @@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
937     /* Setup cpu initialized, callin, callout masks */
938     setup_cpu_local_masks();
939    
940     -#ifdef CONFIG_X86_32
941     /*
942     * Sync back kernel address range again. We already did this in
943     * setup_arch(), but percpu data also needs to be available in
944     * the smpboot asm. We can't reliably pick up percpu mappings
945     * using vmalloc_fault(), because exception dispatch needs
946     * percpu data.
947     + *
948     + * FIXME: Can the later sync in setup_cpu_entry_areas() replace
949     + * this call?
950     */
951     - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
952     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
953     - KERNEL_PGD_PTRS);
954     -
955     - /*
956     - * sync back low identity map too. It is used for example
957     - * in the 32-bit EFI stub.
958     - */
959     - clone_pgd_range(initial_page_table,
960     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
961     - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
962     -#endif
963     + sync_initial_page_table();
964     }
965     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
966     index e2c1fb8d35ce..dbb8b476b41b 100644
967     --- a/arch/x86/kvm/lapic.c
968     +++ b/arch/x86/kvm/lapic.c
969     @@ -1993,14 +1993,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
970    
971     void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
972     {
973     - struct kvm_lapic *apic;
974     + struct kvm_lapic *apic = vcpu->arch.apic;
975     int i;
976    
977     - apic_debug("%s\n", __func__);
978     + if (!apic)
979     + return;
980    
981     - ASSERT(vcpu);
982     - apic = vcpu->arch.apic;
983     - ASSERT(apic != NULL);
984     + apic_debug("%s\n", __func__);
985    
986     /* Stop the timer in case it's a reset to an active apic */
987     hrtimer_cancel(&apic->lapic_timer.timer);
988     @@ -2156,7 +2155,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
989     */
990     vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
991     static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
992     - kvm_lapic_reset(vcpu, false);
993     kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
994    
995     return 0;
996     @@ -2560,7 +2558,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
997    
998     pe = xchg(&apic->pending_events, 0);
999     if (test_bit(KVM_APIC_INIT, &pe)) {
1000     - kvm_lapic_reset(vcpu, true);
1001     kvm_vcpu_reset(vcpu, true);
1002     if (kvm_vcpu_is_bsp(apic->vcpu))
1003     vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1004     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1005     index cc83bdcb65d1..e080dbe55360 100644
1006     --- a/arch/x86/kvm/mmu.c
1007     +++ b/arch/x86/kvm/mmu.c
1008     @@ -3017,7 +3017,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
1009     return RET_PF_RETRY;
1010     }
1011    
1012     - return -EFAULT;
1013     + return RET_PF_EMULATE;
1014     }
1015    
1016     static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
1017     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1018     index 4e3c79530526..3505afabce5d 100644
1019     --- a/arch/x86/kvm/svm.c
1020     +++ b/arch/x86/kvm/svm.c
1021     @@ -45,6 +45,7 @@
1022     #include <asm/debugreg.h>
1023     #include <asm/kvm_para.h>
1024     #include <asm/irq_remapping.h>
1025     +#include <asm/microcode.h>
1026     #include <asm/nospec-branch.h>
1027    
1028     #include <asm/virtext.h>
1029     @@ -5029,7 +5030,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1030     * being speculatively taken.
1031     */
1032     if (svm->spec_ctrl)
1033     - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1034     + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1035    
1036     asm volatile (
1037     "push %%" _ASM_BP "; \n\t"
1038     @@ -5138,11 +5139,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1039     * If the L02 MSR bitmap does not intercept the MSR, then we need to
1040     * save it.
1041     */
1042     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
1043     - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1044     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1045     + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1046    
1047     if (svm->spec_ctrl)
1048     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1049     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1050    
1051     /* Eliminate branch target predictions from guest mode */
1052     vmexit_fill_RSB();
1053     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1054     index 561d8937fac5..87b453eeae40 100644
1055     --- a/arch/x86/kvm/vmx.c
1056     +++ b/arch/x86/kvm/vmx.c
1057     @@ -51,6 +51,7 @@
1058     #include <asm/apic.h>
1059     #include <asm/irq_remapping.h>
1060     #include <asm/mmu_context.h>
1061     +#include <asm/microcode.h>
1062     #include <asm/nospec-branch.h>
1063    
1064     #include "trace.h"
1065     @@ -9443,7 +9444,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1066     * being speculatively taken.
1067     */
1068     if (vmx->spec_ctrl)
1069     - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1070     + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1071    
1072     vmx->__launched = vmx->loaded_vmcs->launched;
1073     asm(
1074     @@ -9578,11 +9579,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1075     * If the L02 MSR bitmap does not intercept the MSR, then we need to
1076     * save it.
1077     */
1078     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
1079     - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1080     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1081     + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1082    
1083     if (vmx->spec_ctrl)
1084     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1085     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1086    
1087     /* Eliminate branch target predictions from guest mode */
1088     vmexit_fill_RSB();
1089     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1090     index 17f4eca37d22..a10da5052072 100644
1091     --- a/arch/x86/kvm/x86.c
1092     +++ b/arch/x86/kvm/x86.c
1093     @@ -7835,6 +7835,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1094    
1095     void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1096     {
1097     + kvm_lapic_reset(vcpu, init_event);
1098     +
1099     vcpu->arch.hflags = 0;
1100    
1101     vcpu->arch.smi_pending = 0;
1102     @@ -8279,10 +8281,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
1103     return r;
1104     }
1105    
1106     - if (!size) {
1107     - r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
1108     - WARN_ON(r < 0);
1109     - }
1110     + if (!size)
1111     + vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
1112    
1113     return 0;
1114     }
1115     diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
1116     index b9283cc27622..476d810639a8 100644
1117     --- a/arch/x86/mm/cpu_entry_area.c
1118     +++ b/arch/x86/mm/cpu_entry_area.c
1119     @@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
1120    
1121     for_each_possible_cpu(cpu)
1122     setup_cpu_entry_area(cpu);
1123     +
1124     + /*
1125     + * This is the last essential update to swapper_pgdir which needs
1126     + * to be synchronized to initial_page_table on 32bit.
1127     + */
1128     + sync_initial_page_table();
1129     }
1130     diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
1131     index 135c9a7898c7..3141e67ec24c 100644
1132     --- a/arch/x86/mm/init_32.c
1133     +++ b/arch/x86/mm/init_32.c
1134     @@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
1135     }
1136     #endif /* CONFIG_HIGHMEM */
1137    
1138     +void __init sync_initial_page_table(void)
1139     +{
1140     + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1141     + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1142     + KERNEL_PGD_PTRS);
1143     +
1144     + /*
1145     + * sync back low identity map too. It is used for example
1146     + * in the 32-bit EFI stub.
1147     + */
1148     + clone_pgd_range(initial_page_table,
1149     + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1150     + min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1151     +}
1152     +
1153     void __init native_pagetable_init(void)
1154     {
1155     unsigned long pfn, va;
1156     diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
1157     index 86676cec99a1..09dd7f3cf621 100644
1158     --- a/arch/x86/platform/intel-mid/intel-mid.c
1159     +++ b/arch/x86/platform/intel-mid/intel-mid.c
1160     @@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
1161    
1162     static void intel_mid_reboot(void)
1163     {
1164     - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
1165     + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
1166     }
1167    
1168     static unsigned long __init intel_mid_calibrate_tsc(void)
1169     diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1170     index d9f96cc5d743..1d83152c761b 100644
1171     --- a/arch/x86/xen/suspend.c
1172     +++ b/arch/x86/xen/suspend.c
1173     @@ -1,12 +1,15 @@
1174     // SPDX-License-Identifier: GPL-2.0
1175     #include <linux/types.h>
1176     #include <linux/tick.h>
1177     +#include <linux/percpu-defs.h>
1178    
1179     #include <xen/xen.h>
1180     #include <xen/interface/xen.h>
1181     #include <xen/grant_table.h>
1182     #include <xen/events.h>
1183    
1184     +#include <asm/cpufeatures.h>
1185     +#include <asm/msr-index.h>
1186     #include <asm/xen/hypercall.h>
1187     #include <asm/xen/page.h>
1188     #include <asm/fixmap.h>
1189     @@ -15,6 +18,8 @@
1190     #include "mmu.h"
1191     #include "pmu.h"
1192    
1193     +static DEFINE_PER_CPU(u64, spec_ctrl);
1194     +
1195     void xen_arch_pre_suspend(void)
1196     {
1197     xen_save_time_memory_area();
1198     @@ -35,6 +40,9 @@ void xen_arch_post_suspend(int cancelled)
1199    
1200     static void xen_vcpu_notify_restore(void *data)
1201     {
1202     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
1203     + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
1204     +
1205     /* Boot processor notified via generic timekeeping_resume() */
1206     if (smp_processor_id() == 0)
1207     return;
1208     @@ -44,7 +52,15 @@ static void xen_vcpu_notify_restore(void *data)
1209    
1210     static void xen_vcpu_notify_suspend(void *data)
1211     {
1212     + u64 tmp;
1213     +
1214     tick_suspend_local();
1215     +
1216     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
1217     + rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
1218     + this_cpu_write(spec_ctrl, tmp);
1219     + wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1220     + }
1221     }
1222    
1223     void xen_arch_resume(void)
1224     diff --git a/block/blk-core.c b/block/blk-core.c
1225     index 82b92adf3477..b725d9e340c2 100644
1226     --- a/block/blk-core.c
1227     +++ b/block/blk-core.c
1228     @@ -2401,7 +2401,7 @@ blk_qc_t submit_bio(struct bio *bio)
1229     unsigned int count;
1230    
1231     if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1232     - count = queue_logical_block_size(bio->bi_disk->queue);
1233     + count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1234     else
1235     count = bio_sectors(bio);
1236    
1237     diff --git a/block/blk-mq.c b/block/blk-mq.c
1238     index 3d3797327491..5629f18b51bd 100644
1239     --- a/block/blk-mq.c
1240     +++ b/block/blk-mq.c
1241     @@ -655,7 +655,6 @@ static void __blk_mq_requeue_request(struct request *rq)
1242    
1243     trace_block_rq_requeue(q, rq);
1244     wbt_requeue(q->rq_wb, &rq->issue_stat);
1245     - blk_mq_sched_requeue_request(rq);
1246    
1247     if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
1248     if (q->dma_drain_size && blk_rq_bytes(rq))
1249     @@ -667,6 +666,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1250     {
1251     __blk_mq_requeue_request(rq);
1252    
1253     + /* this request will be re-inserted to io scheduler queue */
1254     + blk_mq_sched_requeue_request(rq);
1255     +
1256     BUG_ON(blk_queued_rq(rq));
1257     blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1258     }
1259     diff --git a/block/ioctl.c b/block/ioctl.c
1260     index 1668506d8ed8..3884d810efd2 100644
1261     --- a/block/ioctl.c
1262     +++ b/block/ioctl.c
1263     @@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
1264    
1265     if (start + len > i_size_read(bdev->bd_inode))
1266     return -EINVAL;
1267     - truncate_inode_pages_range(mapping, start, start + len);
1268     + truncate_inode_pages_range(mapping, start, start + len - 1);
1269     return blkdev_issue_discard(bdev, start >> 9, len >> 9,
1270     GFP_KERNEL, flags);
1271     }
1272     diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
1273     index f95c60774ce8..0d6d25e32e1f 100644
1274     --- a/block/kyber-iosched.c
1275     +++ b/block/kyber-iosched.c
1276     @@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = {
1277     .limit_depth = kyber_limit_depth,
1278     .prepare_request = kyber_prepare_request,
1279     .finish_request = kyber_finish_request,
1280     + .requeue_request = kyber_finish_request,
1281     .completed_request = kyber_completed_request,
1282     .dispatch_request = kyber_dispatch_request,
1283     .has_work = kyber_has_work,
1284     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
1285     index 4d0979e02a28..b6d58cc58f5f 100644
1286     --- a/drivers/acpi/bus.c
1287     +++ b/drivers/acpi/bus.c
1288     @@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
1289     return 0;
1290     }
1291     #endif
1292     +static int set_gbl_term_list(const struct dmi_system_id *id)
1293     +{
1294     + acpi_gbl_parse_table_as_term_list = 1;
1295     + return 0;
1296     +}
1297    
1298     -static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1299     +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
1300     + /*
1301     + * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
1302     + * mode.
1303     + * https://bugzilla.kernel.org/show_bug.cgi?id=198515
1304     + */
1305     + {
1306     + .callback = set_gbl_term_list,
1307     + .ident = "Dell Precision M5530",
1308     + .matches = {
1309     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1310     + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
1311     + },
1312     + },
1313     + {
1314     + .callback = set_gbl_term_list,
1315     + .ident = "Dell XPS 15 9570",
1316     + .matches = {
1317     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1318     + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
1319     + },
1320     + },
1321     /*
1322     * Invoke DSDT corruption work-around on all Toshiba Satellite.
1323     + * DSDT will be copied to memory.
1324     * https://bugzilla.kernel.org/show_bug.cgi?id=14679
1325     */
1326     {
1327     @@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1328     {}
1329     };
1330     #else
1331     -static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1332     +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
1333     {}
1334     };
1335     #endif
1336     @@ -1001,11 +1028,8 @@ void __init acpi_early_init(void)
1337    
1338     acpi_permanent_mmap = true;
1339    
1340     - /*
1341     - * If the machine falls into the DMI check table,
1342     - * DSDT will be copied to memory
1343     - */
1344     - dmi_check_system(dsdt_dmi_table);
1345     + /* Check machine-specific quirks */
1346     + dmi_check_system(acpi_quirks_dmi_table);
1347    
1348     status = acpi_reallocate_root_table();
1349     if (ACPI_FAILURE(status)) {
1350     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1351     index 76980e78ae56..e71e54c478da 100644
1352     --- a/drivers/bluetooth/btusb.c
1353     +++ b/drivers/bluetooth/btusb.c
1354     @@ -21,6 +21,7 @@
1355     *
1356     */
1357    
1358     +#include <linux/dmi.h>
1359     #include <linux/module.h>
1360     #include <linux/usb.h>
1361     #include <linux/usb/quirks.h>
1362     @@ -376,6 +377,21 @@ static const struct usb_device_id blacklist_table[] = {
1363     { } /* Terminating entry */
1364     };
1365    
1366     +/* The Bluetooth USB module build into some devices needs to be reset on resume,
1367     + * this is a problem with the platform (likely shutting off all power) not with
1368     + * the module itself. So we use a DMI list to match known broken platforms.
1369     + */
1370     +static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
1371     + {
1372     + /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
1373     + .matches = {
1374     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1375     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
1376     + },
1377     + },
1378     + {}
1379     +};
1380     +
1381     #define BTUSB_MAX_ISOC_FRAMES 10
1382    
1383     #define BTUSB_INTR_RUNNING 0
1384     @@ -3031,6 +3047,9 @@ static int btusb_probe(struct usb_interface *intf,
1385     hdev->send = btusb_send_frame;
1386     hdev->notify = btusb_notify;
1387    
1388     + if (dmi_check_system(btusb_needs_reset_resume_table))
1389     + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
1390     +
1391     #ifdef CONFIG_PM
1392     err = btusb_config_oob_wake(hdev);
1393     if (err)
1394     @@ -3117,12 +3136,6 @@ static int btusb_probe(struct usb_interface *intf,
1395     if (id->driver_info & BTUSB_QCA_ROME) {
1396     data->setup_on_usb = btusb_setup_qca;
1397     hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
1398     -
1399     - /* QCA Rome devices lose their updated firmware over suspend,
1400     - * but the USB hub doesn't notice any status change.
1401     - * explicitly request a device reset on resume.
1402     - */
1403     - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
1404     }
1405    
1406     #ifdef CONFIG_BT_HCIBTUSB_RTL
1407     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1408     index 71fad747c0c7..7499b0cd8326 100644
1409     --- a/drivers/char/ipmi/ipmi_si_intf.c
1410     +++ b/drivers/char/ipmi/ipmi_si_intf.c
1411     @@ -2045,6 +2045,7 @@ static int try_smi_init(struct smi_info *new_smi)
1412     int rv = 0;
1413     int i;
1414     char *init_name = NULL;
1415     + bool platform_device_registered = false;
1416    
1417     pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1418     ipmi_addr_src_to_str(new_smi->io.addr_source),
1419     @@ -2173,6 +2174,7 @@ static int try_smi_init(struct smi_info *new_smi)
1420     rv);
1421     goto out_err;
1422     }
1423     + platform_device_registered = true;
1424     }
1425    
1426     dev_set_drvdata(new_smi->io.dev, new_smi);
1427     @@ -2279,10 +2281,11 @@ static int try_smi_init(struct smi_info *new_smi)
1428     }
1429    
1430     if (new_smi->pdev) {
1431     - platform_device_unregister(new_smi->pdev);
1432     + if (platform_device_registered)
1433     + platform_device_unregister(new_smi->pdev);
1434     + else
1435     + platform_device_put(new_smi->pdev);
1436     new_smi->pdev = NULL;
1437     - } else if (new_smi->pdev) {
1438     - platform_device_put(new_smi->pdev);
1439     }
1440    
1441     kfree(init_name);
1442     diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
1443     index 4d1dc8b46877..f95b9c75175b 100644
1444     --- a/drivers/char/tpm/st33zp24/st33zp24.c
1445     +++ b/drivers/char/tpm/st33zp24/st33zp24.c
1446     @@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
1447     size_t count)
1448     {
1449     int size = 0;
1450     - int expected;
1451     + u32 expected;
1452    
1453     if (!chip)
1454     return -EBUSY;
1455     @@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
1456     }
1457    
1458     expected = be32_to_cpu(*(__be32 *)(buf + 2));
1459     - if (expected > count) {
1460     + if (expected > count || expected < TPM_HEADER_SIZE) {
1461     size = -EIO;
1462     goto out;
1463     }
1464     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1465     index 1d6729be4cd6..3cec403a80b3 100644
1466     --- a/drivers/char/tpm/tpm-interface.c
1467     +++ b/drivers/char/tpm/tpm-interface.c
1468     @@ -1228,6 +1228,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1469     break;
1470    
1471     recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
1472     + if (recd > num_bytes) {
1473     + total = -EFAULT;
1474     + break;
1475     + }
1476    
1477     rlength = be32_to_cpu(tpm_cmd.header.out.length);
1478     if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
1479     diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1480     index f40d20671a78..f6be08483ae6 100644
1481     --- a/drivers/char/tpm/tpm2-cmd.c
1482     +++ b/drivers/char/tpm/tpm2-cmd.c
1483     @@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
1484     if (!rc) {
1485     data_len = be16_to_cpup(
1486     (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
1487     + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
1488     + rc = -EFAULT;
1489     + goto out;
1490     + }
1491    
1492     rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
1493     ->header.out.length);
1494     diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1495     index 79d6bbb58e39..d5b44cadac56 100644
1496     --- a/drivers/char/tpm/tpm_i2c_infineon.c
1497     +++ b/drivers/char/tpm/tpm_i2c_infineon.c
1498     @@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
1499     static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1500     {
1501     int size = 0;
1502     - int expected, status;
1503     + int status;
1504     + u32 expected;
1505    
1506     if (count < TPM_HEADER_SIZE) {
1507     size = -EIO;
1508     @@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1509     }
1510    
1511     expected = be32_to_cpu(*(__be32 *)(buf + 2));
1512     - if ((size_t) expected > count) {
1513     + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
1514     size = -EIO;
1515     goto out;
1516     }
1517     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1518     index c6428771841f..caa86b19c76d 100644
1519     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1520     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1521     @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1522     struct device *dev = chip->dev.parent;
1523     struct i2c_client *client = to_i2c_client(dev);
1524     s32 rc;
1525     - int expected, status, burst_count, retries, size = 0;
1526     + int status;
1527     + int burst_count;
1528     + int retries;
1529     + int size = 0;
1530     + u32 expected;
1531    
1532     if (count < TPM_HEADER_SIZE) {
1533     i2c_nuvoton_ready(chip); /* return to idle */
1534     @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1535     * to machine native
1536     */
1537     expected = be32_to_cpu(*(__be32 *) (buf + 2));
1538     - if (expected > count) {
1539     + if (expected > count || expected < size) {
1540     dev_err(dev, "%s() expected > count\n", __func__);
1541     size = -EIO;
1542     continue;
1543     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1544     index fdde971bc810..7561922bc8f8 100644
1545     --- a/drivers/char/tpm/tpm_tis_core.c
1546     +++ b/drivers/char/tpm/tpm_tis_core.c
1547     @@ -202,7 +202,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1548     {
1549     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1550     int size = 0;
1551     - int expected, status;
1552     + int status;
1553     + u32 expected;
1554    
1555     if (count < TPM_HEADER_SIZE) {
1556     size = -EIO;
1557     @@ -217,7 +218,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1558     }
1559    
1560     expected = be32_to_cpu(*(__be32 *) (buf + 2));
1561     - if (expected > count) {
1562     + if (expected > count || expected < TPM_HEADER_SIZE) {
1563     size = -EIO;
1564     goto out;
1565     }
1566     diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
1567     index 7b596fa38ad2..6bebc1f9f55a 100644
1568     --- a/drivers/cpufreq/s3c24xx-cpufreq.c
1569     +++ b/drivers/cpufreq/s3c24xx-cpufreq.c
1570     @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
1571     static int s3c_cpufreq_init(struct cpufreq_policy *policy)
1572     {
1573     policy->clk = clk_arm;
1574     - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
1575     +
1576     + policy->cpuinfo.transition_latency = cpu_cur.info->latency;
1577     +
1578     + if (ftab)
1579     + return cpufreq_table_validate_and_show(policy, ftab);
1580     +
1581     + return 0;
1582     }
1583    
1584     static int __init s3c_cpufreq_initclks(void)
1585     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1586     index f34430f99fd8..872100215ca0 100644
1587     --- a/drivers/edac/sb_edac.c
1588     +++ b/drivers/edac/sb_edac.c
1589     @@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
1590     * sbridge structs
1591     */
1592    
1593     -#define NUM_CHANNELS 4 /* Max channels per MC */
1594     +#define NUM_CHANNELS 6 /* Max channels per MC */
1595     #define MAX_DIMMS 3 /* Max DIMMS per channel */
1596     #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
1597     #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
1598     diff --git a/drivers/md/md.c b/drivers/md/md.c
1599     index 4e4dee0ec2de..926542fbc892 100644
1600     --- a/drivers/md/md.c
1601     +++ b/drivers/md/md.c
1602     @@ -8554,6 +8554,10 @@ static int remove_and_add_spares(struct mddev *mddev,
1603     int removed = 0;
1604     bool remove_some = false;
1605    
1606     + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
1607     + /* Mustn't remove devices when resync thread is running */
1608     + return 0;
1609     +
1610     rdev_for_each(rdev, mddev) {
1611     if ((this == NULL || rdev == this) &&
1612     rdev->raid_disk >= 0 &&
1613     diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
1614     index 50bce68ffd66..65d157fe76d1 100644
1615     --- a/drivers/media/dvb-frontends/m88ds3103.c
1616     +++ b/drivers/media/dvb-frontends/m88ds3103.c
1617     @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
1618     * New users must use I2C client binding directly!
1619     */
1620     struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1621     - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
1622     + struct i2c_adapter *i2c,
1623     + struct i2c_adapter **tuner_i2c_adapter)
1624     {
1625     struct i2c_client *client;
1626     struct i2c_board_info board_info;
1627     - struct m88ds3103_platform_data pdata;
1628     + struct m88ds3103_platform_data pdata = {};
1629    
1630     pdata.clk = cfg->clock;
1631     pdata.i2c_wr_max = cfg->i2c_wr_max;
1632     @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
1633     case M88DS3103_CHIP_ID:
1634     break;
1635     default:
1636     + ret = -ENODEV;
1637     + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
1638     goto err_kfree;
1639     }
1640    
1641     diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
1642     index 35026795be28..fa41d9422d57 100644
1643     --- a/drivers/mmc/host/dw_mmc-exynos.c
1644     +++ b/drivers/mmc/host/dw_mmc-exynos.c
1645     @@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
1646    
1647     static const struct dw_mci_drv_data exynos_drv_data = {
1648     .caps = exynos_dwmmc_caps,
1649     + .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
1650     .init = dw_mci_exynos_priv_init,
1651     .set_ios = dw_mci_exynos_set_ios,
1652     .parse_dt = dw_mci_exynos_parse_dt,
1653     diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
1654     index 73fd75c3c824..89cdb3d533bb 100644
1655     --- a/drivers/mmc/host/dw_mmc-k3.c
1656     +++ b/drivers/mmc/host/dw_mmc-k3.c
1657     @@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
1658     if (priv->ctrl_id < 0)
1659     priv->ctrl_id = 0;
1660    
1661     + if (priv->ctrl_id >= TIMING_MODE)
1662     + return -EINVAL;
1663     +
1664     host->priv = priv;
1665     return 0;
1666     }
1667     @@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
1668    
1669     static const struct dw_mci_drv_data hi6220_data = {
1670     .caps = dw_mci_hi6220_caps,
1671     + .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
1672     .switch_voltage = dw_mci_hi6220_switch_voltage,
1673     .set_ios = dw_mci_hi6220_set_ios,
1674     .parse_dt = dw_mci_hi6220_parse_dt,
1675     diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
1676     index a3f1c2b30145..339295212935 100644
1677     --- a/drivers/mmc/host/dw_mmc-rockchip.c
1678     +++ b/drivers/mmc/host/dw_mmc-rockchip.c
1679     @@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
1680    
1681     static const struct dw_mci_drv_data rk3288_drv_data = {
1682     .caps = dw_mci_rk3288_dwmmc_caps,
1683     + .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
1684     .set_ios = dw_mci_rk3288_set_ios,
1685     .execute_tuning = dw_mci_rk3288_execute_tuning,
1686     .parse_dt = dw_mci_rk3288_parse_dt,
1687     diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
1688     index d38e94ae2b85..c06b5393312f 100644
1689     --- a/drivers/mmc/host/dw_mmc-zx.c
1690     +++ b/drivers/mmc/host/dw_mmc-zx.c
1691     @@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
1692    
1693     static const struct dw_mci_drv_data zx_drv_data = {
1694     .caps = zx_dwmmc_caps,
1695     + .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
1696     .execute_tuning = dw_mci_zx_execute_tuning,
1697     .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
1698     .parse_dt = dw_mci_zx_parse_dt,
1699     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1700     index 0aa39975f33b..d9b4acefed31 100644
1701     --- a/drivers/mmc/host/dw_mmc.c
1702     +++ b/drivers/mmc/host/dw_mmc.c
1703     @@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
1704     {
1705     struct dw_mci *host = s->private;
1706    
1707     + pm_runtime_get_sync(host->dev);
1708     +
1709     seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
1710     seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
1711     seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
1712     @@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
1713     seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
1714     seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
1715    
1716     + pm_runtime_put_autosuspend(host->dev);
1717     +
1718     return 0;
1719     }
1720    
1721     @@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1722     return IRQ_HANDLED;
1723     }
1724    
1725     +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
1726     +{
1727     + struct dw_mci *host = slot->host;
1728     + const struct dw_mci_drv_data *drv_data = host->drv_data;
1729     + struct mmc_host *mmc = slot->mmc;
1730     + int ctrl_id;
1731     +
1732     + if (host->pdata->caps)
1733     + mmc->caps = host->pdata->caps;
1734     +
1735     + /*
1736     + * Support MMC_CAP_ERASE by default.
1737     + * It needs to use trim/discard/erase commands.
1738     + */
1739     + mmc->caps |= MMC_CAP_ERASE;
1740     +
1741     + if (host->pdata->pm_caps)
1742     + mmc->pm_caps = host->pdata->pm_caps;
1743     +
1744     + if (host->dev->of_node) {
1745     + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1746     + if (ctrl_id < 0)
1747     + ctrl_id = 0;
1748     + } else {
1749     + ctrl_id = to_platform_device(host->dev)->id;
1750     + }
1751     +
1752     + if (drv_data && drv_data->caps) {
1753     + if (ctrl_id >= drv_data->num_caps) {
1754     + dev_err(host->dev, "invalid controller id %d\n",
1755     + ctrl_id);
1756     + return -EINVAL;
1757     + }
1758     + mmc->caps |= drv_data->caps[ctrl_id];
1759     + }
1760     +
1761     + if (host->pdata->caps2)
1762     + mmc->caps2 = host->pdata->caps2;
1763     +
1764     + /* Process SDIO IRQs through the sdio_irq_work. */
1765     + if (mmc->caps & MMC_CAP_SDIO_IRQ)
1766     + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
1767     +
1768     + return 0;
1769     +}
1770     +
1771     static int dw_mci_init_slot(struct dw_mci *host)
1772     {
1773     struct mmc_host *mmc;
1774     struct dw_mci_slot *slot;
1775     - const struct dw_mci_drv_data *drv_data = host->drv_data;
1776     - int ctrl_id, ret;
1777     + int ret;
1778     u32 freq[2];
1779    
1780     mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
1781     @@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
1782     if (!mmc->ocr_avail)
1783     mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1784    
1785     - if (host->pdata->caps)
1786     - mmc->caps = host->pdata->caps;
1787     -
1788     - /*
1789     - * Support MMC_CAP_ERASE by default.
1790     - * It needs to use trim/discard/erase commands.
1791     - */
1792     - mmc->caps |= MMC_CAP_ERASE;
1793     -
1794     - if (host->pdata->pm_caps)
1795     - mmc->pm_caps = host->pdata->pm_caps;
1796     -
1797     - if (host->dev->of_node) {
1798     - ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
1799     - if (ctrl_id < 0)
1800     - ctrl_id = 0;
1801     - } else {
1802     - ctrl_id = to_platform_device(host->dev)->id;
1803     - }
1804     - if (drv_data && drv_data->caps)
1805     - mmc->caps |= drv_data->caps[ctrl_id];
1806     -
1807     - if (host->pdata->caps2)
1808     - mmc->caps2 = host->pdata->caps2;
1809     -
1810     ret = mmc_of_parse(mmc);
1811     if (ret)
1812     goto err_host_allocated;
1813    
1814     - /* Process SDIO IRQs through the sdio_irq_work. */
1815     - if (mmc->caps & MMC_CAP_SDIO_IRQ)
1816     - mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
1817     + ret = dw_mci_init_slot_caps(slot);
1818     + if (ret)
1819     + goto err_host_allocated;
1820    
1821     /* Useful defaults if platform data is unset. */
1822     if (host->use_dma == TRANS_MODE_IDMAC) {
1823     diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
1824     index e3124f06a47e..1424bd490dd1 100644
1825     --- a/drivers/mmc/host/dw_mmc.h
1826     +++ b/drivers/mmc/host/dw_mmc.h
1827     @@ -543,6 +543,7 @@ struct dw_mci_slot {
1828     /**
1829     * dw_mci driver data - dw-mshc implementation specific driver data.
1830     * @caps: mmc subsystem specified capabilities of the controller(s).
1831     + * @num_caps: number of capabilities specified by @caps.
1832     * @init: early implementation specific initialization.
1833     * @set_ios: handle bus specific extensions.
1834     * @parse_dt: parse implementation specific device tree properties.
1835     @@ -554,6 +555,7 @@ struct dw_mci_slot {
1836     */
1837     struct dw_mci_drv_data {
1838     unsigned long *caps;
1839     + u32 num_caps;
1840     int (*init)(struct dw_mci *host);
1841     void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
1842     int (*parse_dt)(struct dw_mci *host);
1843     diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
1844     index 3e4f04fd5175..bf93e8b0b191 100644
1845     --- a/drivers/mmc/host/sdhci-pci-core.c
1846     +++ b/drivers/mmc/host/sdhci-pci-core.c
1847     @@ -593,9 +593,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
1848     slot->chip->rpm_retune = intel_host->d3_retune;
1849     }
1850    
1851     -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1852     +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
1853     +{
1854     + int err = sdhci_execute_tuning(mmc, opcode);
1855     + struct sdhci_host *host = mmc_priv(mmc);
1856     +
1857     + if (err)
1858     + return err;
1859     +
1860     + /*
1861     + * Tuning can leave the IP in an active state (Buffer Read Enable bit
1862     + * set) which prevents the entry to low power states (i.e. S0i3). Data
1863     + * reset will clear it.
1864     + */
1865     + sdhci_reset(host, SDHCI_RESET_DATA);
1866     +
1867     + return 0;
1868     +}
1869     +
1870     +static void byt_probe_slot(struct sdhci_pci_slot *slot)
1871     {
1872     + struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1873     +
1874     byt_read_dsm(slot);
1875     +
1876     + ops->execute_tuning = intel_execute_tuning;
1877     +}
1878     +
1879     +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
1880     +{
1881     + byt_probe_slot(slot);
1882     slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
1883     MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
1884     MMC_CAP_CMD_DURING_TFR |
1885     @@ -650,7 +677,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1886     {
1887     int err;
1888    
1889     - byt_read_dsm(slot);
1890     + byt_probe_slot(slot);
1891    
1892     err = ni_set_max_freq(slot);
1893     if (err)
1894     @@ -663,7 +690,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1895    
1896     static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1897     {
1898     - byt_read_dsm(slot);
1899     + byt_probe_slot(slot);
1900     slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1901     MMC_CAP_WAIT_WHILE_BUSY;
1902     return 0;
1903     @@ -671,7 +698,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1904    
1905     static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1906     {
1907     - byt_read_dsm(slot);
1908     + byt_probe_slot(slot);
1909     slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1910     MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1911     slot->cd_idx = 0;
1912     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1913     index a74a8fbad53a..2e6075ce5dca 100644
1914     --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1915     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
1916     @@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data)
1917    
1918     reissue_mask = 1 << 0;
1919     if (!pdata->per_channel_irq)
1920     - reissue_mask |= 0xffff < 4;
1921     + reissue_mask |= 0xffff << 4;
1922    
1923     XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
1924     }
1925     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1926     index 3e5833cf1fab..eb23f9ba1a9a 100644
1927     --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1928     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
1929     @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
1930     struct net_device *netdev = pdata->netdev;
1931     int ret = 0;
1932    
1933     + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1934     +
1935     pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
1936     XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
1937    
1938     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1939     index d699bf88d18f..6044fdcf6056 100644
1940     --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1941     +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1942     @@ -156,7 +156,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
1943    
1944     if (is_t6(padap->params.chip)) {
1945     size = padap->params.cim_la_size / 10 + 1;
1946     - size *= 11 * sizeof(u32);
1947     + size *= 10 * sizeof(u32);
1948     } else {
1949     size = padap->params.cim_la_size / 8;
1950     size *= 8 * sizeof(u32);
1951     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
1952     index 29cc625e9833..97465101e0b9 100644
1953     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
1954     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
1955     @@ -97,7 +97,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
1956     case CUDBG_CIM_LA:
1957     if (is_t6(adap->params.chip)) {
1958     len = adap->params.cim_la_size / 10 + 1;
1959     - len *= 11 * sizeof(u32);
1960     + len *= 10 * sizeof(u32);
1961     } else {
1962     len = adap->params.cim_la_size / 8;
1963     len *= 8 * sizeof(u32);
1964     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1965     index 62a18914f00f..a7113e702f58 100644
1966     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1967     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1968     @@ -1878,6 +1878,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1969     ixgbe_rx_pg_size(rx_ring),
1970     DMA_FROM_DEVICE,
1971     IXGBE_RX_DMA_ATTR);
1972     + } else if (ring_uses_build_skb(rx_ring)) {
1973     + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1974     +
1975     + dma_sync_single_range_for_cpu(rx_ring->dev,
1976     + IXGBE_CB(skb)->dma,
1977     + offset,
1978     + skb_headlen(skb),
1979     + DMA_FROM_DEVICE);
1980     } else {
1981     struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1982    
1983     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1984     index d8aefeed124d..0d352d4cf48c 100644
1985     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1986     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1987     @@ -1911,13 +1911,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1988     param->wq.linear = 1;
1989     }
1990    
1991     -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1992     +static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1993     + struct mlx5e_rq_param *param)
1994     {
1995     void *rqc = param->rqc;
1996     void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1997    
1998     MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1999     MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
2000     +
2001     + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2002     }
2003    
2004     static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2005     @@ -2774,6 +2777,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2006     struct mlx5e_cq *cq,
2007     struct mlx5e_cq_param *param)
2008     {
2009     + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2010     + param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2011     +
2012     return mlx5e_alloc_cq_common(mdev, param, cq);
2013     }
2014    
2015     @@ -2785,7 +2791,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2016     struct mlx5e_cq *cq = &drop_rq->cq;
2017     int err;
2018    
2019     - mlx5e_build_drop_rq_param(&rq_param);
2020     + mlx5e_build_drop_rq_param(mdev, &rq_param);
2021    
2022     err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2023     if (err)
2024     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2025     index 5b499c7a698f..36611b64a91c 100644
2026     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2027     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2028     @@ -36,6 +36,7 @@
2029     #include <linux/tcp.h>
2030     #include <linux/bpf_trace.h>
2031     #include <net/busy_poll.h>
2032     +#include <net/ip6_checksum.h>
2033     #include "en.h"
2034     #include "en_tc.h"
2035     #include "eswitch.h"
2036     @@ -547,20 +548,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
2037     return true;
2038     }
2039    
2040     +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
2041     +{
2042     + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
2043     + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
2044     + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
2045     +
2046     + tcp->check = 0;
2047     + tcp->psh = get_cqe_lro_tcppsh(cqe);
2048     +
2049     + if (tcp_ack) {
2050     + tcp->ack = 1;
2051     + tcp->ack_seq = cqe->lro_ack_seq_num;
2052     + tcp->window = cqe->lro_tcp_win;
2053     + }
2054     +}
2055     +
2056     static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
2057     u32 cqe_bcnt)
2058     {
2059     struct ethhdr *eth = (struct ethhdr *)(skb->data);
2060     struct tcphdr *tcp;
2061     int network_depth = 0;
2062     + __wsum check;
2063     __be16 proto;
2064     u16 tot_len;
2065     void *ip_p;
2066    
2067     - u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
2068     - u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
2069     - (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
2070     -
2071     proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
2072    
2073     tot_len = cqe_bcnt - network_depth;
2074     @@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
2075     ipv4->check = 0;
2076     ipv4->check = ip_fast_csum((unsigned char *)ipv4,
2077     ipv4->ihl);
2078     +
2079     + mlx5e_lro_update_tcp_hdr(cqe, tcp);
2080     + check = csum_partial(tcp, tcp->doff * 4,
2081     + csum_unfold((__force __sum16)cqe->check_sum));
2082     + /* Almost done, don't forget the pseudo header */
2083     + tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
2084     + tot_len - sizeof(struct iphdr),
2085     + IPPROTO_TCP, check);
2086     } else {
2087     + u16 payload_len = tot_len - sizeof(struct ipv6hdr);
2088     struct ipv6hdr *ipv6 = ip_p;
2089    
2090     tcp = ip_p + sizeof(struct ipv6hdr);
2091     skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2092    
2093     ipv6->hop_limit = cqe->lro_min_ttl;
2094     - ipv6->payload_len = cpu_to_be16(tot_len -
2095     - sizeof(struct ipv6hdr));
2096     - }
2097     -
2098     - tcp->psh = get_cqe_lro_tcppsh(cqe);
2099     -
2100     - if (tcp_ack) {
2101     - tcp->ack = 1;
2102     - tcp->ack_seq = cqe->lro_ack_seq_num;
2103     - tcp->window = cqe->lro_tcp_win;
2104     + ipv6->payload_len = cpu_to_be16(payload_len);
2105     +
2106     + mlx5e_lro_update_tcp_hdr(cqe, tcp);
2107     + check = csum_partial(tcp, tcp->doff * 4,
2108     + csum_unfold((__force __sum16)cqe->check_sum));
2109     + /* Almost done, don't forget the pseudo header */
2110     + tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
2111     + IPPROTO_TCP, check);
2112     }
2113     }
2114    
2115     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2116     index 5a4608281f38..707976482c09 100644
2117     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2118     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2119     @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
2120     if (iph->protocol != IPPROTO_UDP)
2121     goto out;
2122    
2123     - udph = udp_hdr(skb);
2124     + /* Don't assume skb_transport_header() was set */
2125     + udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
2126     if (udph->dest != htons(9))
2127     goto out;
2128    
2129     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2130     index 569b42a01026..11b4f1089d1c 100644
2131     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2132     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2133     @@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
2134     default:
2135     hlen = mlx5e_skb_l2_header_offset(skb);
2136     }
2137     - return min_t(u16, hlen, skb->len);
2138     + return min_t(u16, hlen, skb_headlen(skb));
2139     }
2140    
2141     static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
2142     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2143     index dfaad9ecb2b8..a681693631aa 100644
2144     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2145     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2146     @@ -1755,8 +1755,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2147    
2148     /* Collect all fgs which has a matching match_criteria */
2149     err = build_match_list(&match_head, ft, spec);
2150     - if (err)
2151     + if (err) {
2152     + if (take_write)
2153     + up_write_ref_node(&ft->node);
2154     return ERR_PTR(err);
2155     + }
2156    
2157     if (!take_write)
2158     up_read_ref_node(&ft->node);
2159     @@ -1765,8 +1768,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2160     dest_num, version);
2161     free_match_list(&match_head);
2162     if (!IS_ERR(rule) ||
2163     - (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
2164     + (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2165     + if (take_write)
2166     + up_write_ref_node(&ft->node);
2167     return rule;
2168     + }
2169    
2170     if (!take_write) {
2171     nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2172     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2173     index 7042c855a5d6..7e50dbc8282c 100644
2174     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2175     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2176     @@ -737,6 +737,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
2177     u32 tb_id,
2178     struct netlink_ext_ack *extack)
2179     {
2180     + struct mlxsw_sp_mr_table *mr4_table;
2181     + struct mlxsw_sp_fib *fib4;
2182     + struct mlxsw_sp_fib *fib6;
2183     struct mlxsw_sp_vr *vr;
2184     int err;
2185    
2186     @@ -745,29 +748,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
2187     NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
2188     return ERR_PTR(-EBUSY);
2189     }
2190     - vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
2191     - if (IS_ERR(vr->fib4))
2192     - return ERR_CAST(vr->fib4);
2193     - vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
2194     - if (IS_ERR(vr->fib6)) {
2195     - err = PTR_ERR(vr->fib6);
2196     + fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
2197     + if (IS_ERR(fib4))
2198     + return ERR_CAST(fib4);
2199     + fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
2200     + if (IS_ERR(fib6)) {
2201     + err = PTR_ERR(fib6);
2202     goto err_fib6_create;
2203     }
2204     - vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
2205     - MLXSW_SP_L3_PROTO_IPV4);
2206     - if (IS_ERR(vr->mr4_table)) {
2207     - err = PTR_ERR(vr->mr4_table);
2208     + mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
2209     + MLXSW_SP_L3_PROTO_IPV4);
2210     + if (IS_ERR(mr4_table)) {
2211     + err = PTR_ERR(mr4_table);
2212     goto err_mr_table_create;
2213     }
2214     + vr->fib4 = fib4;
2215     + vr->fib6 = fib6;
2216     + vr->mr4_table = mr4_table;
2217     vr->tb_id = tb_id;
2218     return vr;
2219    
2220     err_mr_table_create:
2221     - mlxsw_sp_fib_destroy(vr->fib6);
2222     - vr->fib6 = NULL;
2223     + mlxsw_sp_fib_destroy(fib6);
2224     err_fib6_create:
2225     - mlxsw_sp_fib_destroy(vr->fib4);
2226     - vr->fib4 = NULL;
2227     + mlxsw_sp_fib_destroy(fib4);
2228     return ERR_PTR(err);
2229     }
2230    
2231     @@ -3761,6 +3765,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2232     struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2233     int i;
2234    
2235     + if (!list_is_singular(&nh_grp->fib_list))
2236     + return;
2237     +
2238     for (i = 0; i < nh_grp->count; i++) {
2239     struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2240    
2241     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2242     index 593ad31be749..161bcdc012f0 100644
2243     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2244     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2245     @@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2246     bool dynamic)
2247     {
2248     char *sfd_pl;
2249     + u8 num_rec;
2250     int err;
2251    
2252     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2253     @@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2254     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2255     mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2256     mac, fid, action, local_port);
2257     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2258     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2259     - kfree(sfd_pl);
2260     + if (err)
2261     + goto out;
2262     +
2263     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2264     + err = -EBUSY;
2265    
2266     +out:
2267     + kfree(sfd_pl);
2268     return err;
2269     }
2270    
2271     @@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2272     bool adding, bool dynamic)
2273     {
2274     char *sfd_pl;
2275     + u8 num_rec;
2276     int err;
2277    
2278     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2279     @@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2280     mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2281     mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
2282     lag_vid, lag_id);
2283     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2284     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2285     - kfree(sfd_pl);
2286     + if (err)
2287     + goto out;
2288     +
2289     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2290     + err = -EBUSY;
2291    
2292     +out:
2293     + kfree(sfd_pl);
2294     return err;
2295     }
2296    
2297     @@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
2298     u16 fid, u16 mid_idx, bool adding)
2299     {
2300     char *sfd_pl;
2301     + u8 num_rec;
2302     int err;
2303    
2304     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2305     @@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
2306     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2307     mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
2308     MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
2309     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2310     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2311     + if (err)
2312     + goto out;
2313     +
2314     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2315     + err = -EBUSY;
2316     +
2317     +out:
2318     kfree(sfd_pl);
2319     return err;
2320     }
2321     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2322     index a73600dceb8b..a1ffc3ed77f9 100644
2323     --- a/drivers/net/ethernet/ti/cpsw.c
2324     +++ b/drivers/net/ethernet/ti/cpsw.c
2325     @@ -1618,6 +1618,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2326     q_idx = q_idx % cpsw->tx_ch_num;
2327    
2328     txch = cpsw->txv[q_idx].ch;
2329     + txq = netdev_get_tx_queue(ndev, q_idx);
2330     ret = cpsw_tx_packet_submit(priv, skb, txch);
2331     if (unlikely(ret != 0)) {
2332     cpsw_err(priv, tx_err, "desc submit failed\n");
2333     @@ -1628,15 +1629,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2334     * tell the kernel to stop sending us tx frames.
2335     */
2336     if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2337     - txq = netdev_get_tx_queue(ndev, q_idx);
2338     netif_tx_stop_queue(txq);
2339     +
2340     + /* Barrier, so that stop_queue visible to other cpus */
2341     + smp_mb__after_atomic();
2342     +
2343     + if (cpdma_check_free_tx_desc(txch))
2344     + netif_tx_wake_queue(txq);
2345     }
2346    
2347     return NETDEV_TX_OK;
2348     fail:
2349     ndev->stats.tx_dropped++;
2350     - txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
2351     netif_tx_stop_queue(txq);
2352     +
2353     + /* Barrier, so that stop_queue visible to other cpus */
2354     + smp_mb__after_atomic();
2355     +
2356     + if (cpdma_check_free_tx_desc(txch))
2357     + netif_tx_wake_queue(txq);
2358     +
2359     return NETDEV_TX_BUSY;
2360     }
2361    
2362     diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2363     index ed10d1fc8f59..39de77a8bb63 100644
2364     --- a/drivers/net/phy/phy.c
2365     +++ b/drivers/net/phy/phy.c
2366     @@ -841,10 +841,10 @@ void phy_start(struct phy_device *phydev)
2367     break;
2368     case PHY_HALTED:
2369     /* if phy was suspended, bring the physical link up again */
2370     - phy_resume(phydev);
2371     + __phy_resume(phydev);
2372    
2373     /* make sure interrupts are re-enabled for the PHY */
2374     - if (phydev->irq != PHY_POLL) {
2375     + if (phy_interrupt_is_valid(phydev)) {
2376     err = phy_enable_interrupts(phydev);
2377     if (err < 0)
2378     break;
2379     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2380     index b15b31ca2618..d312b314825e 100644
2381     --- a/drivers/net/phy/phy_device.c
2382     +++ b/drivers/net/phy/phy_device.c
2383     @@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
2384     if (!mdio_bus_phy_may_suspend(phydev))
2385     goto no_resume;
2386    
2387     - mutex_lock(&phydev->lock);
2388     ret = phy_resume(phydev);
2389     - mutex_unlock(&phydev->lock);
2390     if (ret < 0)
2391     return ret;
2392    
2393     @@ -1028,9 +1026,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
2394     if (err)
2395     goto error;
2396    
2397     - mutex_lock(&phydev->lock);
2398     phy_resume(phydev);
2399     - mutex_unlock(&phydev->lock);
2400     phy_led_triggers_register(phydev);
2401    
2402     return err;
2403     @@ -1156,7 +1152,7 @@ int phy_suspend(struct phy_device *phydev)
2404     }
2405     EXPORT_SYMBOL(phy_suspend);
2406    
2407     -int phy_resume(struct phy_device *phydev)
2408     +int __phy_resume(struct phy_device *phydev)
2409     {
2410     struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
2411     int ret = 0;
2412     @@ -1173,6 +1169,18 @@ int phy_resume(struct phy_device *phydev)
2413    
2414     return ret;
2415     }
2416     +EXPORT_SYMBOL(__phy_resume);
2417     +
2418     +int phy_resume(struct phy_device *phydev)
2419     +{
2420     + int ret;
2421     +
2422     + mutex_lock(&phydev->lock);
2423     + ret = __phy_resume(phydev);
2424     + mutex_unlock(&phydev->lock);
2425     +
2426     + return ret;
2427     +}
2428     EXPORT_SYMBOL(phy_resume);
2429    
2430     int phy_loopback(struct phy_device *phydev, bool enable)
2431     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
2432     index 264d4af0bf69..9f79f9274c50 100644
2433     --- a/drivers/net/ppp/ppp_generic.c
2434     +++ b/drivers/net/ppp/ppp_generic.c
2435     @@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
2436     goto outl;
2437    
2438     ppp_lock(ppp);
2439     + spin_lock_bh(&pch->downl);
2440     + if (!pch->chan) {
2441     + /* Don't connect unregistered channels */
2442     + spin_unlock_bh(&pch->downl);
2443     + ppp_unlock(ppp);
2444     + ret = -ENOTCONN;
2445     + goto outl;
2446     + }
2447     + spin_unlock_bh(&pch->downl);
2448     if (pch->file.hdrlen > ppp->file.hdrlen)
2449     ppp->file.hdrlen = pch->file.hdrlen;
2450     hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
2451     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2452     index e29cd5c7d39f..f50cf06c9353 100644
2453     --- a/drivers/net/tun.c
2454     +++ b/drivers/net/tun.c
2455     @@ -1471,6 +1471,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2456     else
2457     *skb_xdp = 0;
2458    
2459     + preempt_disable();
2460     rcu_read_lock();
2461     xdp_prog = rcu_dereference(tun->xdp_prog);
2462     if (xdp_prog && !*skb_xdp) {
2463     @@ -1490,9 +1491,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2464     get_page(alloc_frag->page);
2465     alloc_frag->offset += buflen;
2466     err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
2467     + xdp_do_flush_map();
2468     if (err)
2469     goto err_redirect;
2470     rcu_read_unlock();
2471     + preempt_enable();
2472     return NULL;
2473     case XDP_TX:
2474     xdp_xmit = true;
2475     @@ -1514,6 +1517,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2476     skb = build_skb(buf, buflen);
2477     if (!skb) {
2478     rcu_read_unlock();
2479     + preempt_enable();
2480     return ERR_PTR(-ENOMEM);
2481     }
2482    
2483     @@ -1526,10 +1530,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2484     skb->dev = tun->dev;
2485     generic_xdp_tx(skb, xdp_prog);
2486     rcu_read_unlock();
2487     + preempt_enable();
2488     return NULL;
2489     }
2490    
2491     rcu_read_unlock();
2492     + preempt_enable();
2493    
2494     return skb;
2495    
2496     @@ -1537,6 +1543,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2497     put_page(alloc_frag->page);
2498     err_xdp:
2499     rcu_read_unlock();
2500     + preempt_enable();
2501     this_cpu_inc(tun->pcpu_stats->rx_dropped);
2502     return NULL;
2503     }
2504     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2505     index 559b215c0169..5907a8d0e921 100644
2506     --- a/drivers/net/virtio_net.c
2507     +++ b/drivers/net/virtio_net.c
2508     @@ -2040,8 +2040,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2509     }
2510    
2511     /* Make sure NAPI is not using any XDP TX queues for RX. */
2512     - for (i = 0; i < vi->max_queue_pairs; i++)
2513     - napi_disable(&vi->rq[i].napi);
2514     + if (netif_running(dev))
2515     + for (i = 0; i < vi->max_queue_pairs; i++)
2516     + napi_disable(&vi->rq[i].napi);
2517    
2518     netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2519     err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2520     @@ -2060,7 +2061,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2521     }
2522     if (old_prog)
2523     bpf_prog_put(old_prog);
2524     - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2525     + if (netif_running(dev))
2526     + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2527     }
2528    
2529     return 0;
2530     diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
2531     index afeca6bcdade..ab8b3cbbb205 100644
2532     --- a/drivers/net/wan/hdlc_ppp.c
2533     +++ b/drivers/net/wan/hdlc_ppp.c
2534     @@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
2535     ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
2536     0, NULL);
2537     proto->restart_counter--;
2538     - } else
2539     + } else if (netif_carrier_ok(proto->dev))
2540     + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
2541     + 0, NULL);
2542     + else
2543     ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
2544     0, NULL);
2545     break;
2546     diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
2547     index cd4725e7e0b5..c864430b9fcf 100644
2548     --- a/drivers/platform/x86/dell-laptop.c
2549     +++ b/drivers/platform/x86/dell-laptop.c
2550     @@ -78,7 +78,6 @@ static struct platform_driver platform_driver = {
2551     }
2552     };
2553    
2554     -static struct calling_interface_buffer *buffer;
2555     static struct platform_device *platform_device;
2556     static struct backlight_device *dell_backlight_device;
2557     static struct rfkill *wifi_rfkill;
2558     @@ -286,7 +285,8 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
2559     { }
2560     };
2561    
2562     -void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
2563     +static void dell_fill_request(struct calling_interface_buffer *buffer,
2564     + u32 arg0, u32 arg1, u32 arg2, u32 arg3)
2565     {
2566     memset(buffer, 0, sizeof(struct calling_interface_buffer));
2567     buffer->input[0] = arg0;
2568     @@ -295,7 +295,8 @@ void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
2569     buffer->input[3] = arg3;
2570     }
2571    
2572     -int dell_send_request(u16 class, u16 select)
2573     +static int dell_send_request(struct calling_interface_buffer *buffer,
2574     + u16 class, u16 select)
2575     {
2576     int ret;
2577    
2578     @@ -432,21 +433,22 @@ static int dell_rfkill_set(void *data, bool blocked)
2579     int disable = blocked ? 1 : 0;
2580     unsigned long radio = (unsigned long)data;
2581     int hwswitch_bit = (unsigned long)data - 1;
2582     + struct calling_interface_buffer buffer;
2583     int hwswitch;
2584     int status;
2585     int ret;
2586    
2587     - dell_set_arguments(0, 0, 0, 0);
2588     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2589     + dell_fill_request(&buffer, 0, 0, 0, 0);
2590     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2591     if (ret)
2592     return ret;
2593     - status = buffer->output[1];
2594     + status = buffer.output[1];
2595    
2596     - dell_set_arguments(0x2, 0, 0, 0);
2597     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2598     + dell_fill_request(&buffer, 0x2, 0, 0, 0);
2599     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2600     if (ret)
2601     return ret;
2602     - hwswitch = buffer->output[1];
2603     + hwswitch = buffer.output[1];
2604    
2605     /* If the hardware switch controls this radio, and the hardware
2606     switch is disabled, always disable the radio */
2607     @@ -454,8 +456,8 @@ static int dell_rfkill_set(void *data, bool blocked)
2608     (status & BIT(0)) && !(status & BIT(16)))
2609     disable = 1;
2610    
2611     - dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0);
2612     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2613     + dell_fill_request(&buffer, 1 | (radio<<8) | (disable << 16), 0, 0, 0);
2614     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2615     return ret;
2616     }
2617    
2618     @@ -464,9 +466,11 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
2619     {
2620     if (status & BIT(0)) {
2621     /* Has hw-switch, sync sw_state to BIOS */
2622     + struct calling_interface_buffer buffer;
2623     int block = rfkill_blocked(rfkill);
2624     - dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0);
2625     - dell_send_request(CLASS_INFO, SELECT_RFKILL);
2626     + dell_fill_request(&buffer,
2627     + 1 | (radio << 8) | (block << 16), 0, 0, 0);
2628     + dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2629     } else {
2630     /* No hw-switch, sync BIOS state to sw_state */
2631     rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
2632     @@ -483,21 +487,22 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
2633     static void dell_rfkill_query(struct rfkill *rfkill, void *data)
2634     {
2635     int radio = ((unsigned long)data & 0xF);
2636     + struct calling_interface_buffer buffer;
2637     int hwswitch;
2638     int status;
2639     int ret;
2640    
2641     - dell_set_arguments(0, 0, 0, 0);
2642     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2643     - status = buffer->output[1];
2644     + dell_fill_request(&buffer, 0, 0, 0, 0);
2645     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2646     + status = buffer.output[1];
2647    
2648     if (ret != 0 || !(status & BIT(0))) {
2649     return;
2650     }
2651    
2652     - dell_set_arguments(0, 0x2, 0, 0);
2653     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2654     - hwswitch = buffer->output[1];
2655     + dell_fill_request(&buffer, 0, 0x2, 0, 0);
2656     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2657     + hwswitch = buffer.output[1];
2658    
2659     if (ret != 0)
2660     return;
2661     @@ -514,22 +519,23 @@ static struct dentry *dell_laptop_dir;
2662    
2663     static int dell_debugfs_show(struct seq_file *s, void *data)
2664     {
2665     + struct calling_interface_buffer buffer;
2666     int hwswitch_state;
2667     int hwswitch_ret;
2668     int status;
2669     int ret;
2670    
2671     - dell_set_arguments(0, 0, 0, 0);
2672     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2673     + dell_fill_request(&buffer, 0, 0, 0, 0);
2674     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2675     if (ret)
2676     return ret;
2677     - status = buffer->output[1];
2678     + status = buffer.output[1];
2679    
2680     - dell_set_arguments(0, 0x2, 0, 0);
2681     - hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2682     + dell_fill_request(&buffer, 0, 0x2, 0, 0);
2683     + hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2684     if (hwswitch_ret)
2685     return hwswitch_ret;
2686     - hwswitch_state = buffer->output[1];
2687     + hwswitch_state = buffer.output[1];
2688    
2689     seq_printf(s, "return:\t%d\n", ret);
2690     seq_printf(s, "status:\t0x%X\n", status);
2691     @@ -610,22 +616,23 @@ static const struct file_operations dell_debugfs_fops = {
2692    
2693     static void dell_update_rfkill(struct work_struct *ignored)
2694     {
2695     + struct calling_interface_buffer buffer;
2696     int hwswitch = 0;
2697     int status;
2698     int ret;
2699    
2700     - dell_set_arguments(0, 0, 0, 0);
2701     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2702     - status = buffer->output[1];
2703     + dell_fill_request(&buffer, 0, 0, 0, 0);
2704     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2705     + status = buffer.output[1];
2706    
2707     if (ret != 0)
2708     return;
2709    
2710     - dell_set_arguments(0, 0x2, 0, 0);
2711     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2712     + dell_fill_request(&buffer, 0, 0x2, 0, 0);
2713     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2714    
2715     if (ret == 0 && (status & BIT(0)))
2716     - hwswitch = buffer->output[1];
2717     + hwswitch = buffer.output[1];
2718    
2719     if (wifi_rfkill) {
2720     dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
2721     @@ -683,6 +690,7 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
2722    
2723     static int __init dell_setup_rfkill(void)
2724     {
2725     + struct calling_interface_buffer buffer;
2726     int status, ret, whitelisted;
2727     const char *product;
2728    
2729     @@ -698,9 +706,9 @@ static int __init dell_setup_rfkill(void)
2730     if (!force_rfkill && !whitelisted)
2731     return 0;
2732    
2733     - dell_set_arguments(0, 0, 0, 0);
2734     - ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
2735     - status = buffer->output[1];
2736     + dell_fill_request(&buffer, 0, 0, 0, 0);
2737     + ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
2738     + status = buffer.output[1];
2739    
2740     /* dell wireless info smbios call is not supported */
2741     if (ret != 0)
2742     @@ -853,6 +861,7 @@ static void dell_cleanup_rfkill(void)
2743    
2744     static int dell_send_intensity(struct backlight_device *bd)
2745     {
2746     + struct calling_interface_buffer buffer;
2747     struct calling_interface_token *token;
2748     int ret;
2749    
2750     @@ -860,17 +869,21 @@ static int dell_send_intensity(struct backlight_device *bd)
2751     if (!token)
2752     return -ENODEV;
2753    
2754     - dell_set_arguments(token->location, bd->props.brightness, 0, 0);
2755     + dell_fill_request(&buffer,
2756     + token->location, bd->props.brightness, 0, 0);
2757     if (power_supply_is_system_supplied() > 0)
2758     - ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
2759     + ret = dell_send_request(&buffer,
2760     + CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
2761     else
2762     - ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
2763     + ret = dell_send_request(&buffer,
2764     + CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
2765    
2766     return ret;
2767     }
2768    
2769     static int dell_get_intensity(struct backlight_device *bd)
2770     {
2771     + struct calling_interface_buffer buffer;
2772     struct calling_interface_token *token;
2773     int ret;
2774    
2775     @@ -878,14 +891,17 @@ static int dell_get_intensity(struct backlight_device *bd)
2776     if (!token)
2777     return -ENODEV;
2778    
2779     - dell_set_arguments(token->location, 0, 0, 0);
2780     + dell_fill_request(&buffer, token->location, 0, 0, 0);
2781     if (power_supply_is_system_supplied() > 0)
2782     - ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
2783     + ret = dell_send_request(&buffer,
2784     + CLASS_TOKEN_READ, SELECT_TOKEN_AC);
2785     else
2786     - ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
2787     + ret = dell_send_request(&buffer,
2788     + CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
2789    
2790     if (ret == 0)
2791     - ret = buffer->output[1];
2792     + ret = buffer.output[1];
2793     +
2794     return ret;
2795     }
2796    
2797     @@ -1149,31 +1165,33 @@ static DEFINE_MUTEX(kbd_led_mutex);
2798    
2799     static int kbd_get_info(struct kbd_info *info)
2800     {
2801     + struct calling_interface_buffer buffer;
2802     u8 units;
2803     int ret;
2804    
2805     - dell_set_arguments(0, 0, 0, 0);
2806     - ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2807     + dell_fill_request(&buffer, 0, 0, 0, 0);
2808     + ret = dell_send_request(&buffer,
2809     + CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2810     if (ret)
2811     return ret;
2812    
2813     - info->modes = buffer->output[1] & 0xFFFF;
2814     - info->type = (buffer->output[1] >> 24) & 0xFF;
2815     - info->triggers = buffer->output[2] & 0xFF;
2816     - units = (buffer->output[2] >> 8) & 0xFF;
2817     - info->levels = (buffer->output[2] >> 16) & 0xFF;
2818     + info->modes = buffer.output[1] & 0xFFFF;
2819     + info->type = (buffer.output[1] >> 24) & 0xFF;
2820     + info->triggers = buffer.output[2] & 0xFF;
2821     + units = (buffer.output[2] >> 8) & 0xFF;
2822     + info->levels = (buffer.output[2] >> 16) & 0xFF;
2823    
2824     if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
2825     info->levels--;
2826    
2827     if (units & BIT(0))
2828     - info->seconds = (buffer->output[3] >> 0) & 0xFF;
2829     + info->seconds = (buffer.output[3] >> 0) & 0xFF;
2830     if (units & BIT(1))
2831     - info->minutes = (buffer->output[3] >> 8) & 0xFF;
2832     + info->minutes = (buffer.output[3] >> 8) & 0xFF;
2833     if (units & BIT(2))
2834     - info->hours = (buffer->output[3] >> 16) & 0xFF;
2835     + info->hours = (buffer.output[3] >> 16) & 0xFF;
2836     if (units & BIT(3))
2837     - info->days = (buffer->output[3] >> 24) & 0xFF;
2838     + info->days = (buffer.output[3] >> 24) & 0xFF;
2839    
2840     return ret;
2841     }
2842     @@ -1233,31 +1251,34 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
2843    
2844     static int kbd_get_state(struct kbd_state *state)
2845     {
2846     + struct calling_interface_buffer buffer;
2847     int ret;
2848    
2849     - dell_set_arguments(0x1, 0, 0, 0);
2850     - ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2851     + dell_fill_request(&buffer, 0x1, 0, 0, 0);
2852     + ret = dell_send_request(&buffer,
2853     + CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2854     if (ret)
2855     return ret;
2856    
2857     - state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
2858     + state->mode_bit = ffs(buffer.output[1] & 0xFFFF);
2859     if (state->mode_bit != 0)
2860     state->mode_bit--;
2861    
2862     - state->triggers = (buffer->output[1] >> 16) & 0xFF;
2863     - state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
2864     - state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
2865     - state->als_setting = buffer->output[2] & 0xFF;
2866     - state->als_value = (buffer->output[2] >> 8) & 0xFF;
2867     - state->level = (buffer->output[2] >> 16) & 0xFF;
2868     - state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
2869     - state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
2870     + state->triggers = (buffer.output[1] >> 16) & 0xFF;
2871     + state->timeout_value = (buffer.output[1] >> 24) & 0x3F;
2872     + state->timeout_unit = (buffer.output[1] >> 30) & 0x3;
2873     + state->als_setting = buffer.output[2] & 0xFF;
2874     + state->als_value = (buffer.output[2] >> 8) & 0xFF;
2875     + state->level = (buffer.output[2] >> 16) & 0xFF;
2876     + state->timeout_value_ac = (buffer.output[2] >> 24) & 0x3F;
2877     + state->timeout_unit_ac = (buffer.output[2] >> 30) & 0x3;
2878    
2879     return ret;
2880     }
2881    
2882     static int kbd_set_state(struct kbd_state *state)
2883     {
2884     + struct calling_interface_buffer buffer;
2885     int ret;
2886     u32 input1;
2887     u32 input2;
2888     @@ -1270,8 +1291,9 @@ static int kbd_set_state(struct kbd_state *state)
2889     input2 |= (state->level & 0xFF) << 16;
2890     input2 |= (state->timeout_value_ac & 0x3F) << 24;
2891     input2 |= (state->timeout_unit_ac & 0x3) << 30;
2892     - dell_set_arguments(0x2, input1, input2, 0);
2893     - ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2894     + dell_fill_request(&buffer, 0x2, input1, input2, 0);
2895     + ret = dell_send_request(&buffer,
2896     + CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
2897    
2898     return ret;
2899     }
2900     @@ -1298,6 +1320,7 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
2901    
2902     static int kbd_set_token_bit(u8 bit)
2903     {
2904     + struct calling_interface_buffer buffer;
2905     struct calling_interface_token *token;
2906     int ret;
2907    
2908     @@ -1308,14 +1331,15 @@ static int kbd_set_token_bit(u8 bit)
2909     if (!token)
2910     return -EINVAL;
2911    
2912     - dell_set_arguments(token->location, token->value, 0, 0);
2913     - ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
2914     + dell_fill_request(&buffer, token->location, token->value, 0, 0);
2915     + ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
2916    
2917     return ret;
2918     }
2919    
2920     static int kbd_get_token_bit(u8 bit)
2921     {
2922     + struct calling_interface_buffer buffer;
2923     struct calling_interface_token *token;
2924     int ret;
2925     int val;
2926     @@ -1327,9 +1351,9 @@ static int kbd_get_token_bit(u8 bit)
2927     if (!token)
2928     return -EINVAL;
2929    
2930     - dell_set_arguments(token->location, 0, 0, 0);
2931     - ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD);
2932     - val = buffer->output[1];
2933     + dell_fill_request(&buffer, token->location, 0, 0, 0);
2934     + ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD);
2935     + val = buffer.output[1];
2936    
2937     if (ret)
2938     return ret;
2939     @@ -2046,6 +2070,7 @@ static struct notifier_block dell_laptop_notifier = {
2940    
2941     int dell_micmute_led_set(int state)
2942     {
2943     + struct calling_interface_buffer buffer;
2944     struct calling_interface_token *token;
2945    
2946     if (state == 0)
2947     @@ -2058,8 +2083,8 @@ int dell_micmute_led_set(int state)
2948     if (!token)
2949     return -ENODEV;
2950    
2951     - dell_set_arguments(token->location, token->value, 0, 0);
2952     - dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
2953     + dell_fill_request(&buffer, token->location, token->value, 0, 0);
2954     + dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
2955    
2956     return state;
2957     }
2958     @@ -2090,13 +2115,6 @@ static int __init dell_init(void)
2959     if (ret)
2960     goto fail_platform_device2;
2961    
2962     - buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
2963     - if (!buffer) {
2964     - ret = -ENOMEM;
2965     - goto fail_buffer;
2966     - }
2967     -
2968     -
2969     ret = dell_setup_rfkill();
2970    
2971     if (ret) {
2972     @@ -2121,10 +2139,13 @@ static int __init dell_init(void)
2973    
2974     token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
2975     if (token) {
2976     - dell_set_arguments(token->location, 0, 0, 0);
2977     - ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
2978     + struct calling_interface_buffer buffer;
2979     +
2980     + dell_fill_request(&buffer, token->location, 0, 0, 0);
2981     + ret = dell_send_request(&buffer,
2982     + CLASS_TOKEN_READ, SELECT_TOKEN_AC);
2983     if (ret)
2984     - max_intensity = buffer->output[3];
2985     + max_intensity = buffer.output[3];
2986     }
2987    
2988     if (max_intensity) {
2989     @@ -2158,8 +2179,6 @@ static int __init dell_init(void)
2990     fail_get_brightness:
2991     backlight_device_unregister(dell_backlight_device);
2992     fail_backlight:
2993     - kfree(buffer);
2994     -fail_buffer:
2995     dell_cleanup_rfkill();
2996     fail_rfkill:
2997     platform_device_del(platform_device);
2998     @@ -2179,7 +2198,6 @@ static void __exit dell_exit(void)
2999     touchpad_led_exit();
3000     kbd_led_exit();
3001     backlight_device_unregister(dell_backlight_device);
3002     - kfree(buffer);
3003     dell_cleanup_rfkill();
3004     if (platform_device) {
3005     platform_device_unregister(platform_device);
3006     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
3007     index badf42acbf95..185b3cd48b88 100644
3008     --- a/drivers/s390/net/qeth_core.h
3009     +++ b/drivers/s390/net/qeth_core.h
3010     @@ -581,6 +581,11 @@ struct qeth_cmd_buffer {
3011     void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
3012     };
3013    
3014     +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
3015     +{
3016     + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
3017     +}
3018     +
3019     /**
3020     * definition of a qeth channel, used for read and write
3021     */
3022     @@ -836,7 +841,7 @@ struct qeth_trap_id {
3023     */
3024     static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
3025     {
3026     - return PFN_UP(end - 1) - PFN_DOWN(start);
3027     + return PFN_UP(end) - PFN_DOWN(start);
3028     }
3029    
3030     static inline int qeth_get_micros(void)
3031     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
3032     index 3614df68830f..61e9d0bca197 100644
3033     --- a/drivers/s390/net/qeth_core_main.c
3034     +++ b/drivers/s390/net/qeth_core_main.c
3035     @@ -2057,7 +2057,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
3036     unsigned long flags;
3037     struct qeth_reply *reply = NULL;
3038     unsigned long timeout, event_timeout;
3039     - struct qeth_ipa_cmd *cmd;
3040     + struct qeth_ipa_cmd *cmd = NULL;
3041    
3042     QETH_CARD_TEXT(card, 2, "sendctl");
3043    
3044     @@ -2071,22 +2071,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
3045     }
3046     reply->callback = reply_cb;
3047     reply->param = reply_param;
3048     - if (card->state == CARD_STATE_DOWN)
3049     - reply->seqno = QETH_IDX_COMMAND_SEQNO;
3050     - else
3051     - reply->seqno = card->seqno.ipa++;
3052     +
3053     init_waitqueue_head(&reply->wait_q);
3054     - spin_lock_irqsave(&card->lock, flags);
3055     - list_add_tail(&reply->list, &card->cmd_waiter_list);
3056     - spin_unlock_irqrestore(&card->lock, flags);
3057    
3058     while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
3059     - qeth_prepare_control_data(card, len, iob);
3060    
3061     - if (IS_IPA(iob->data))
3062     + if (IS_IPA(iob->data)) {
3063     + cmd = __ipa_cmd(iob);
3064     + cmd->hdr.seqno = card->seqno.ipa++;
3065     + reply->seqno = cmd->hdr.seqno;
3066     event_timeout = QETH_IPA_TIMEOUT;
3067     - else
3068     + } else {
3069     + reply->seqno = QETH_IDX_COMMAND_SEQNO;
3070     event_timeout = QETH_TIMEOUT;
3071     + }
3072     + qeth_prepare_control_data(card, len, iob);
3073     +
3074     + spin_lock_irqsave(&card->lock, flags);
3075     + list_add_tail(&reply->list, &card->cmd_waiter_list);
3076     + spin_unlock_irqrestore(&card->lock, flags);
3077     +
3078     timeout = jiffies + event_timeout;
3079    
3080     QETH_CARD_TEXT(card, 6, "noirqpnd");
3081     @@ -2111,9 +2115,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
3082    
3083     /* we have only one long running ipassist, since we can ensure
3084     process context of this command we can sleep */
3085     - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3086     - if ((cmd->hdr.command == IPA_CMD_SETIP) &&
3087     - (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
3088     + if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
3089     + cmd->hdr.prot_version == QETH_PROT_IPV4) {
3090     if (!wait_event_timeout(reply->wait_q,
3091     atomic_read(&reply->received), event_timeout))
3092     goto time_err;
3093     @@ -2868,7 +2871,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
3094     memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
3095     cmd->hdr.command = command;
3096     cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
3097     - cmd->hdr.seqno = card->seqno.ipa;
3098     + /* cmd->hdr.seqno is set by qeth_send_control_data() */
3099     cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
3100     cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
3101     if (card->options.layer2)
3102     @@ -3833,10 +3836,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3103     int qeth_get_elements_no(struct qeth_card *card,
3104     struct sk_buff *skb, int extra_elems, int data_offset)
3105     {
3106     - int elements = qeth_get_elements_for_range(
3107     - (addr_t)skb->data + data_offset,
3108     - (addr_t)skb->data + skb_headlen(skb)) +
3109     - qeth_get_elements_for_frags(skb);
3110     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
3111     + int elements = qeth_get_elements_for_frags(skb);
3112     + addr_t start = (addr_t)skb->data + data_offset;
3113     +
3114     + if (start != end)
3115     + elements += qeth_get_elements_for_range(start, end);
3116    
3117     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3118     QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3119     diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
3120     index e5833837b799..8727b9517de8 100644
3121     --- a/drivers/s390/net/qeth_l3.h
3122     +++ b/drivers/s390/net/qeth_l3.h
3123     @@ -40,8 +40,40 @@ struct qeth_ipaddr {
3124     unsigned int pfxlen;
3125     } a6;
3126     } u;
3127     -
3128     };
3129     +
3130     +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
3131     + struct qeth_ipaddr *a2)
3132     +{
3133     + if (a1->proto != a2->proto)
3134     + return false;
3135     + if (a1->proto == QETH_PROT_IPV6)
3136     + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
3137     + return a1->u.a4.addr == a2->u.a4.addr;
3138     +}
3139     +
3140     +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
3141     + struct qeth_ipaddr *a2)
3142     +{
3143     + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
3144     + * so 'proto' and 'addr' match for sure.
3145     + *
3146     + * For ucast:
3147     + * - 'mac' is always 0.
3148     + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
3149     + * values are required to avoid mixups in takeover eligibility.
3150     + *
3151     + * For mcast,
3152     + * - 'mac' is mapped from the IP, and thus always matches.
3153     + * - 'mask'/'pfxlen' is always 0.
3154     + */
3155     + if (a1->type != a2->type)
3156     + return false;
3157     + if (a1->proto == QETH_PROT_IPV6)
3158     + return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
3159     + return a1->u.a4.mask == a2->u.a4.mask;
3160     +}
3161     +
3162     static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
3163     {
3164     u64 ret = 0;
3165     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
3166     index ef0961e18686..33131c594627 100644
3167     --- a/drivers/s390/net/qeth_l3_main.c
3168     +++ b/drivers/s390/net/qeth_l3_main.c
3169     @@ -150,6 +150,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
3170     return -EINVAL;
3171     }
3172    
3173     +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
3174     + struct qeth_ipaddr *query)
3175     +{
3176     + u64 key = qeth_l3_ipaddr_hash(query);
3177     + struct qeth_ipaddr *addr;
3178     +
3179     + if (query->is_multicast) {
3180     + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
3181     + if (qeth_l3_addr_match_ip(addr, query))
3182     + return addr;
3183     + } else {
3184     + hash_for_each_possible(card->ip_htable, addr, hnode, key)
3185     + if (qeth_l3_addr_match_ip(addr, query))
3186     + return addr;
3187     + }
3188     + return NULL;
3189     +}
3190     +
3191     static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
3192     {
3193     int i, j;
3194     @@ -203,34 +221,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
3195     return rc;
3196     }
3197    
3198     -inline int
3199     -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
3200     -{
3201     - return addr1->proto == addr2->proto &&
3202     - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
3203     - !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
3204     -}
3205     -
3206     -static struct qeth_ipaddr *
3207     -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3208     -{
3209     - struct qeth_ipaddr *addr;
3210     -
3211     - if (tmp_addr->is_multicast) {
3212     - hash_for_each_possible(card->ip_mc_htable, addr,
3213     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
3214     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
3215     - return addr;
3216     - } else {
3217     - hash_for_each_possible(card->ip_htable, addr,
3218     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
3219     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
3220     - return addr;
3221     - }
3222     -
3223     - return NULL;
3224     -}
3225     -
3226     int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3227     {
3228     int rc = 0;
3229     @@ -245,23 +235,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3230     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
3231     }
3232    
3233     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
3234     - if (!addr)
3235     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
3236     + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
3237     return -ENOENT;
3238    
3239     addr->ref_counter--;
3240     - if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
3241     - addr->type == QETH_IP_TYPE_RXIP))
3242     + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
3243     return rc;
3244     if (addr->in_progress)
3245     return -EINPROGRESS;
3246    
3247     - if (!qeth_card_hw_is_reachable(card)) {
3248     - addr->disp_flag = QETH_DISP_ADDR_DELETE;
3249     - return 0;
3250     - }
3251     -
3252     - rc = qeth_l3_deregister_addr_entry(card, addr);
3253     + if (qeth_card_hw_is_reachable(card))
3254     + rc = qeth_l3_deregister_addr_entry(card, addr);
3255    
3256     hash_del(&addr->hnode);
3257     kfree(addr);
3258     @@ -273,6 +258,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3259     {
3260     int rc = 0;
3261     struct qeth_ipaddr *addr;
3262     + char buf[40];
3263    
3264     QETH_CARD_TEXT(card, 4, "addip");
3265    
3266     @@ -283,8 +269,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3267     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
3268     }
3269    
3270     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
3271     - if (!addr) {
3272     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
3273     + if (addr) {
3274     + if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
3275     + return -EADDRINUSE;
3276     + if (qeth_l3_addr_match_all(addr, tmp_addr)) {
3277     + addr->ref_counter++;
3278     + return 0;
3279     + }
3280     + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
3281     + buf);
3282     + dev_warn(&card->gdev->dev,
3283     + "Registering IP address %s failed\n", buf);
3284     + return -EADDRINUSE;
3285     + } else {
3286     addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
3287     if (!addr)
3288     return -ENOMEM;
3289     @@ -324,19 +322,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3290     (rc == IPA_RC_LAN_OFFLINE)) {
3291     addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3292     if (addr->ref_counter < 1) {
3293     - qeth_l3_delete_ip(card, addr);
3294     + qeth_l3_deregister_addr_entry(card, addr);
3295     + hash_del(&addr->hnode);
3296     kfree(addr);
3297     }
3298     } else {
3299     hash_del(&addr->hnode);
3300     kfree(addr);
3301     }
3302     - } else {
3303     - if (addr->type == QETH_IP_TYPE_NORMAL ||
3304     - addr->type == QETH_IP_TYPE_RXIP)
3305     - addr->ref_counter++;
3306     }
3307     -
3308     return rc;
3309     }
3310    
3311     @@ -404,11 +398,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
3312     spin_lock_bh(&card->ip_lock);
3313    
3314     hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
3315     - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
3316     - qeth_l3_deregister_addr_entry(card, addr);
3317     - hash_del(&addr->hnode);
3318     - kfree(addr);
3319     - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
3320     + if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
3321     if (addr->proto == QETH_PROT_IPV4) {
3322     addr->in_progress = 1;
3323     spin_unlock_bh(&card->ip_lock);
3324     @@ -724,12 +714,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
3325     return -ENOMEM;
3326    
3327     spin_lock_bh(&card->ip_lock);
3328     -
3329     - if (qeth_l3_ip_from_hash(card, ipaddr))
3330     - rc = -EEXIST;
3331     - else
3332     - qeth_l3_add_ip(card, ipaddr);
3333     -
3334     + rc = qeth_l3_add_ip(card, ipaddr);
3335     spin_unlock_bh(&card->ip_lock);
3336    
3337     kfree(ipaddr);
3338     @@ -792,12 +777,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
3339     return -ENOMEM;
3340    
3341     spin_lock_bh(&card->ip_lock);
3342     -
3343     - if (qeth_l3_ip_from_hash(card, ipaddr))
3344     - rc = -EEXIST;
3345     - else
3346     - qeth_l3_add_ip(card, ipaddr);
3347     -
3348     + rc = qeth_l3_add_ip(card, ipaddr);
3349     spin_unlock_bh(&card->ip_lock);
3350    
3351     kfree(ipaddr);
3352     @@ -1405,8 +1385,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
3353     memcpy(tmp->mac, buf, sizeof(tmp->mac));
3354     tmp->is_multicast = 1;
3355    
3356     - ipm = qeth_l3_ip_from_hash(card, tmp);
3357     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
3358     if (ipm) {
3359     + /* for mcast, by-IP match means full match */
3360     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3361     } else {
3362     ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3363     @@ -1489,8 +1470,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
3364     sizeof(struct in6_addr));
3365     tmp->is_multicast = 1;
3366    
3367     - ipm = qeth_l3_ip_from_hash(card, tmp);
3368     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
3369     if (ipm) {
3370     + /* for mcast, by-IP match means full match */
3371     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3372     continue;
3373     }
3374     @@ -2629,11 +2611,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
3375     static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
3376     struct sk_buff *skb, int extra_elems)
3377     {
3378     - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
3379     - int elements = qeth_get_elements_for_range(
3380     - tcpdptr,
3381     - (addr_t)skb->data + skb_headlen(skb)) +
3382     - qeth_get_elements_for_frags(skb);
3383     + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
3384     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
3385     + int elements = qeth_get_elements_for_frags(skb);
3386     +
3387     + if (start != end)
3388     + elements += qeth_get_elements_for_range(start, end);
3389    
3390     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3391     QETH_DBF_MESSAGE(2,
3392     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
3393     index e30e29ae4819..45657e2b1ff7 100644
3394     --- a/drivers/vfio/vfio_iommu_type1.c
3395     +++ b/drivers/vfio/vfio_iommu_type1.c
3396     @@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3397     {
3398     struct page *page[1];
3399     struct vm_area_struct *vma;
3400     + struct vm_area_struct *vmas[1];
3401     int ret;
3402    
3403     if (mm == current->mm) {
3404     - ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
3405     - page);
3406     + ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
3407     + page, vmas);
3408     } else {
3409     unsigned int flags = 0;
3410    
3411     @@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3412    
3413     down_read(&mm->mmap_sem);
3414     ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
3415     - NULL, NULL);
3416     + vmas, NULL);
3417     + /*
3418     + * The lifetime of a vaddr_get_pfn() page pin is
3419     + * userspace-controlled. In the fs-dax case this could
3420     + * lead to indefinite stalls in filesystem operations.
3421     + * Disallow attempts to pin fs-dax pages via this
3422     + * interface.
3423     + */
3424     + if (ret > 0 && vma_is_fsdax(vmas[0])) {
3425     + ret = -EOPNOTSUPP;
3426     + put_page(page[0]);
3427     + }
3428     up_read(&mm->mmap_sem);
3429     }
3430    
3431     diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
3432     index a28bba801264..27baaff96880 100644
3433     --- a/fs/btrfs/sysfs.c
3434     +++ b/fs/btrfs/sysfs.c
3435     @@ -423,7 +423,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
3436     {
3437     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3438    
3439     - return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
3440     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
3441     }
3442    
3443     BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
3444     @@ -433,8 +433,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
3445     {
3446     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3447    
3448     - return snprintf(buf, PAGE_SIZE, "%u\n",
3449     - fs_info->super_copy->sectorsize);
3450     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
3451     }
3452    
3453     BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
3454     @@ -444,8 +443,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
3455     {
3456     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3457    
3458     - return snprintf(buf, PAGE_SIZE, "%u\n",
3459     - fs_info->super_copy->sectorsize);
3460     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
3461     }
3462    
3463     BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
3464     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
3465     index 5a8c2649af2f..10d12b3de001 100644
3466     --- a/fs/btrfs/transaction.c
3467     +++ b/fs/btrfs/transaction.c
3468     @@ -1723,19 +1723,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
3469    
3470     super = fs_info->super_copy;
3471    
3472     + /* update latest btrfs_super_block::chunk_root refs */
3473     root_item = &fs_info->chunk_root->root_item;
3474     - super->chunk_root = root_item->bytenr;
3475     - super->chunk_root_generation = root_item->generation;
3476     - super->chunk_root_level = root_item->level;
3477     + btrfs_set_super_chunk_root(super, root_item->bytenr);
3478     + btrfs_set_super_chunk_root_generation(super, root_item->generation);
3479     + btrfs_set_super_chunk_root_level(super, root_item->level);
3480    
3481     + /* update latest btrfs_super_block::root refs */
3482     root_item = &fs_info->tree_root->root_item;
3483     - super->root = root_item->bytenr;
3484     - super->generation = root_item->generation;
3485     - super->root_level = root_item->level;
3486     + btrfs_set_super_root(super, root_item->bytenr);
3487     + btrfs_set_super_generation(super, root_item->generation);
3488     + btrfs_set_super_root_level(super, root_item->level);
3489     +
3490     if (btrfs_test_opt(fs_info, SPACE_CACHE))
3491     - super->cache_generation = root_item->generation;
3492     + btrfs_set_super_cache_generation(super, root_item->generation);
3493     if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
3494     - super->uuid_tree_generation = root_item->generation;
3495     + btrfs_set_super_uuid_tree_generation(super,
3496     + root_item->generation);
3497     }
3498    
3499     int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
3500     diff --git a/fs/direct-io.c b/fs/direct-io.c
3501     index 3aafb3343a65..b76110e96d62 100644
3502     --- a/fs/direct-io.c
3503     +++ b/fs/direct-io.c
3504     @@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
3505     */
3506     if (dio->is_async && iov_iter_rw(iter) == WRITE) {
3507     retval = 0;
3508     - if ((iocb->ki_filp->f_flags & O_DSYNC) ||
3509     - IS_SYNC(iocb->ki_filp->f_mapping->host))
3510     + if (iocb->ki_flags & IOCB_DSYNC)
3511     retval = dio_set_defer_completion(dio);
3512     else if (!dio->inode->i_sb->s_dio_done_wq) {
3513     /*
3514     diff --git a/include/linux/fs.h b/include/linux/fs.h
3515     index 511fbaabf624..79421287ff5e 100644
3516     --- a/include/linux/fs.h
3517     +++ b/include/linux/fs.h
3518     @@ -3204,7 +3204,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
3519     if (!vma_is_dax(vma))
3520     return false;
3521     inode = file_inode(vma->vm_file);
3522     - if (inode->i_mode == S_IFCHR)
3523     + if (S_ISCHR(inode->i_mode))
3524     return false; /* device-dax */
3525     return true;
3526     }
3527     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
3528     index fbc98e2c8228..132e3f5a2e0d 100644
3529     --- a/include/linux/nospec.h
3530     +++ b/include/linux/nospec.h
3531     @@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
3532     BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
3533     BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
3534     \
3535     - _i &= _mask; \
3536     - _i; \
3537     + (typeof(_i)) (_i & _mask); \
3538     })
3539     #endif /* _LINUX_NOSPEC_H */
3540     diff --git a/include/linux/phy.h b/include/linux/phy.h
3541     index dc82a07cb4fd..123cd703741d 100644
3542     --- a/include/linux/phy.h
3543     +++ b/include/linux/phy.h
3544     @@ -819,6 +819,7 @@ void phy_device_remove(struct phy_device *phydev);
3545     int phy_init_hw(struct phy_device *phydev);
3546     int phy_suspend(struct phy_device *phydev);
3547     int phy_resume(struct phy_device *phydev);
3548     +int __phy_resume(struct phy_device *phydev);
3549     int phy_loopback(struct phy_device *phydev, bool enable);
3550     struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
3551     phy_interface_t interface);
3552     diff --git a/include/net/udplite.h b/include/net/udplite.h
3553     index 81bdbf97319b..9185e45b997f 100644
3554     --- a/include/net/udplite.h
3555     +++ b/include/net/udplite.h
3556     @@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
3557     UDP_SKB_CB(skb)->cscov = cscov;
3558     if (skb->ip_summed == CHECKSUM_COMPLETE)
3559     skb->ip_summed = CHECKSUM_NONE;
3560     + skb->csum_valid = 0;
3561     }
3562    
3563     return 0;
3564     diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
3565     index aa9d2a2b1210..cf8e4df808cf 100644
3566     --- a/kernel/time/hrtimer.c
3567     +++ b/kernel/time/hrtimer.c
3568     @@ -1104,7 +1104,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
3569    
3570     cpu_base = raw_cpu_ptr(&hrtimer_bases);
3571    
3572     - if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
3573     + /*
3574     + * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
3575     + * clock modifications, so they needs to become CLOCK_MONOTONIC to
3576     + * ensure POSIX compliance.
3577     + */
3578     + if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
3579     clock_id = CLOCK_MONOTONIC;
3580    
3581     base = hrtimer_clockid_to_base(clock_id);
3582     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3583     index 0bcf00e3ce48..e9eb29a0edc5 100644
3584     --- a/kernel/time/timer.c
3585     +++ b/kernel/time/timer.c
3586     @@ -1886,6 +1886,12 @@ int timers_dead_cpu(unsigned int cpu)
3587     raw_spin_lock_irq(&new_base->lock);
3588     raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3589    
3590     + /*
3591     + * The current CPUs base clock might be stale. Update it
3592     + * before moving the timers over.
3593     + */
3594     + forward_timer_base(new_base);
3595     +
3596     BUG_ON(old_base->running_timer);
3597    
3598     for (i = 0; i < WHEEL_SIZE; i++)
3599     diff --git a/lib/vsprintf.c b/lib/vsprintf.c
3600     index 01c3957b2de6..062ac753a101 100644
3601     --- a/lib/vsprintf.c
3602     +++ b/lib/vsprintf.c
3603     @@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
3604     {
3605     const int default_width = 2 * sizeof(void *);
3606    
3607     - if (!ptr && *fmt != 'K') {
3608     + if (!ptr && *fmt != 'K' && *fmt != 'x') {
3609     /*
3610     * Print (null) with the same width as a pointer so it makes
3611     * tabular output look nice.
3612     diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
3613     index 0254c35b2bf0..126a8ea73c96 100644
3614     --- a/net/bridge/br_sysfs_if.c
3615     +++ b/net/bridge/br_sysfs_if.c
3616     @@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
3617     struct brport_attribute *brport_attr = to_brport_attr(attr);
3618     struct net_bridge_port *p = to_brport(kobj);
3619    
3620     + if (!brport_attr->show)
3621     + return -EINVAL;
3622     +
3623     return brport_attr->show(p, buf);
3624     }
3625    
3626     diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
3627     index 51935270c651..9896f4975353 100644
3628     --- a/net/bridge/br_vlan.c
3629     +++ b/net/bridge/br_vlan.c
3630     @@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
3631     masterv = br_vlan_find(vg, vid);
3632     if (WARN_ON(!masterv))
3633     return NULL;
3634     + refcount_set(&masterv->refcnt, 1);
3635     + return masterv;
3636     }
3637     refcount_inc(&masterv->refcnt);
3638    
3639     diff --git a/net/core/dev.c b/net/core/dev.c
3640     index c8c102a3467f..a2a89acd0de8 100644
3641     --- a/net/core/dev.c
3642     +++ b/net/core/dev.c
3643     @@ -2366,8 +2366,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
3644     */
3645     int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3646     {
3647     + bool disabling;
3648     int rc;
3649    
3650     + disabling = txq < dev->real_num_tx_queues;
3651     +
3652     if (txq < 1 || txq > dev->num_tx_queues)
3653     return -EINVAL;
3654    
3655     @@ -2383,15 +2386,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3656     if (dev->num_tc)
3657     netif_setup_tc(dev, txq);
3658    
3659     - if (txq < dev->real_num_tx_queues) {
3660     + dev->real_num_tx_queues = txq;
3661     +
3662     + if (disabling) {
3663     + synchronize_net();
3664     qdisc_reset_all_tx_gt(dev, txq);
3665     #ifdef CONFIG_XPS
3666     netif_reset_xps_queues_gt(dev, txq);
3667     #endif
3668     }
3669     + } else {
3670     + dev->real_num_tx_queues = txq;
3671     }
3672    
3673     - dev->real_num_tx_queues = txq;
3674     return 0;
3675     }
3676     EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3677     diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
3678     index 0a3f88f08727..98fd12721221 100644
3679     --- a/net/core/gen_estimator.c
3680     +++ b/net/core/gen_estimator.c
3681     @@ -66,6 +66,7 @@ struct net_rate_estimator {
3682     static void est_fetch_counters(struct net_rate_estimator *e,
3683     struct gnet_stats_basic_packed *b)
3684     {
3685     + memset(b, 0, sizeof(*b));
3686     if (e->stats_lock)
3687     spin_lock(e->stats_lock);
3688    
3689     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3690     index c586597da20d..7d36a950d961 100644
3691     --- a/net/ipv4/fib_semantics.c
3692     +++ b/net/ipv4/fib_semantics.c
3693     @@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
3694     fi->fib_nh, cfg, extack))
3695     return 1;
3696     }
3697     +#ifdef CONFIG_IP_ROUTE_CLASSID
3698     + if (cfg->fc_flow &&
3699     + cfg->fc_flow != fi->fib_nh->nh_tclassid)
3700     + return 1;
3701     +#endif
3702     if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
3703     (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
3704     return 0;
3705     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3706     index 4e153b23bcec..f746e49dd585 100644
3707     --- a/net/ipv4/route.c
3708     +++ b/net/ipv4/route.c
3709     @@ -128,10 +128,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
3710     static int ip_rt_error_cost __read_mostly = HZ;
3711     static int ip_rt_error_burst __read_mostly = 5 * HZ;
3712     static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
3713     -static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3714     +static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3715     static int ip_rt_min_advmss __read_mostly = 256;
3716    
3717     static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
3718     +
3719     +static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3720     +
3721     /*
3722     * Interface to generic destination cache.
3723     */
3724     @@ -1829,6 +1832,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
3725     return skb_get_hash_raw(skb) >> 1;
3726     memset(&hash_keys, 0, sizeof(hash_keys));
3727     skb_flow_dissect_flow_keys(skb, &keys, flag);
3728     +
3729     + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
3730     hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
3731     hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
3732     hash_keys.ports.src = keys.ports.src;
3733     @@ -2934,7 +2939,8 @@ static struct ctl_table ipv4_route_table[] = {
3734     .data = &ip_rt_min_pmtu,
3735     .maxlen = sizeof(int),
3736     .mode = 0644,
3737     - .proc_handler = proc_dointvec,
3738     + .proc_handler = proc_dointvec_minmax,
3739     + .extra1 = &ip_min_valid_pmtu,
3740     },
3741     {
3742     .procname = "min_adv_mss",
3743     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3744     index 45f750e85714..0228f494b0a5 100644
3745     --- a/net/ipv4/tcp_input.c
3746     +++ b/net/ipv4/tcp_input.c
3747     @@ -1977,11 +1977,6 @@ void tcp_enter_loss(struct sock *sk)
3748     /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
3749     * loss recovery is underway except recurring timeout(s) on
3750     * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
3751     - *
3752     - * In theory F-RTO can be used repeatedly during loss recovery.
3753     - * In practice this interacts badly with broken middle-boxes that
3754     - * falsely raise the receive window, which results in repeated
3755     - * timeouts and stop-and-go behavior.
3756     */
3757     tp->frto = net->ipv4.sysctl_tcp_frto &&
3758     (new_recovery || icsk->icsk_retransmits) &&
3759     @@ -2637,18 +2632,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
3760     tcp_try_undo_loss(sk, false))
3761     return;
3762    
3763     - /* The ACK (s)acks some never-retransmitted data meaning not all
3764     - * the data packets before the timeout were lost. Therefore we
3765     - * undo the congestion window and state. This is essentially
3766     - * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
3767     - * a retransmitted skb is permantly marked, we can apply such an
3768     - * operation even if F-RTO was not used.
3769     - */
3770     - if ((flag & FLAG_ORIG_SACK_ACKED) &&
3771     - tcp_try_undo_loss(sk, tp->undo_marker))
3772     - return;
3773     -
3774     if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
3775     + /* Step 3.b. A timeout is spurious if not all data are
3776     + * lost, i.e., never-retransmitted data are (s)acked.
3777     + */
3778     + if ((flag & FLAG_ORIG_SACK_ACKED) &&
3779     + tcp_try_undo_loss(sk, true))
3780     + return;
3781     +
3782     if (after(tp->snd_nxt, tp->high_seq)) {
3783     if (flag & FLAG_DATA_SACKED || is_dupack)
3784     tp->frto = 0; /* Step 3.a. loss was real */
3785     @@ -3988,6 +3979,7 @@ void tcp_reset(struct sock *sk)
3786     /* This barrier is coupled with smp_rmb() in tcp_poll() */
3787     smp_wmb();
3788    
3789     + tcp_write_queue_purge(sk);
3790     tcp_done(sk);
3791    
3792     if (!sock_flag(sk, SOCK_DEAD))
3793     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3794     index 94e28350f420..3b051b9b3743 100644
3795     --- a/net/ipv4/tcp_ipv4.c
3796     +++ b/net/ipv4/tcp_ipv4.c
3797     @@ -705,7 +705,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
3798     */
3799     if (sk) {
3800     arg.bound_dev_if = sk->sk_bound_dev_if;
3801     - trace_tcp_send_reset(sk, skb);
3802     + if (sk_fullsock(sk))
3803     + trace_tcp_send_reset(sk, skb);
3804     }
3805    
3806     BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
3807     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3808     index a4d214c7b506..580912de16c2 100644
3809     --- a/net/ipv4/tcp_output.c
3810     +++ b/net/ipv4/tcp_output.c
3811     @@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
3812     */
3813     segs = max_t(u32, bytes / mss_now, min_tso_segs);
3814    
3815     - return min_t(u32, segs, sk->sk_gso_max_segs);
3816     + return segs;
3817     }
3818     EXPORT_SYMBOL(tcp_tso_autosize);
3819    
3820     @@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
3821     const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
3822     u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
3823    
3824     - return tso_segs ? :
3825     - tcp_tso_autosize(sk, mss_now,
3826     - sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
3827     + if (!tso_segs)
3828     + tso_segs = tcp_tso_autosize(sk, mss_now,
3829     + sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
3830     + return min_t(u32, tso_segs, sk->sk_gso_max_segs);
3831     }
3832    
3833     /* Returns the portion of skb which can be sent right away */
3834     @@ -2026,6 +2027,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
3835     }
3836     }
3837    
3838     +static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
3839     +{
3840     + struct sk_buff *skb, *next;
3841     +
3842     + skb = tcp_send_head(sk);
3843     + tcp_for_write_queue_from_safe(skb, next, sk) {
3844     + if (len <= skb->len)
3845     + break;
3846     +
3847     + if (unlikely(TCP_SKB_CB(skb)->eor))
3848     + return false;
3849     +
3850     + len -= skb->len;
3851     + }
3852     +
3853     + return true;
3854     +}
3855     +
3856     /* Create a new MTU probe if we are ready.
3857     * MTU probe is regularly attempting to increase the path MTU by
3858     * deliberately sending larger packets. This discovers routing
3859     @@ -2098,6 +2117,9 @@ static int tcp_mtu_probe(struct sock *sk)
3860     return 0;
3861     }
3862    
3863     + if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
3864     + return -1;
3865     +
3866     /* We're allowed to probe. Build it now. */
3867     nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
3868     if (!nskb)
3869     @@ -2133,6 +2155,10 @@ static int tcp_mtu_probe(struct sock *sk)
3870     /* We've eaten all the data from this skb.
3871     * Throw it away. */
3872     TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
3873     + /* If this is the last SKB we copy and eor is set
3874     + * we need to propagate it to the new skb.
3875     + */
3876     + TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
3877     tcp_unlink_write_queue(skb, sk);
3878     sk_wmem_free_skb(sk, skb);
3879     } else {
3880     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3881     index e4ff25c947c5..590f9ed90c1f 100644
3882     --- a/net/ipv4/udp.c
3883     +++ b/net/ipv4/udp.c
3884     @@ -2031,6 +2031,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
3885     err = udplite_checksum_init(skb, uh);
3886     if (err)
3887     return err;
3888     +
3889     + if (UDP_SKB_CB(skb)->partial_cov) {
3890     + skb->csum = inet_compute_pseudo(skb, proto);
3891     + return 0;
3892     + }
3893     }
3894    
3895     /* Note, we are only interested in != 0 or == 0, thus the
3896     diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
3897     index ec43d18b5ff9..547515e8450a 100644
3898     --- a/net/ipv6/ip6_checksum.c
3899     +++ b/net/ipv6/ip6_checksum.c
3900     @@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
3901     err = udplite_checksum_init(skb, uh);
3902     if (err)
3903     return err;
3904     +
3905     + if (UDP_SKB_CB(skb)->partial_cov) {
3906     + skb->csum = ip6_compute_pseudo(skb, proto);
3907     + return 0;
3908     + }
3909     }
3910    
3911     /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
3912     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3913     index 3873d3877135..3a1775a62973 100644
3914     --- a/net/ipv6/sit.c
3915     +++ b/net/ipv6/sit.c
3916     @@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
3917     #ifdef CONFIG_IPV6_SIT_6RD
3918     struct ip_tunnel *t = netdev_priv(dev);
3919    
3920     - if (t->dev == sitn->fb_tunnel_dev) {
3921     + if (dev == sitn->fb_tunnel_dev) {
3922     ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
3923     t->ip6rd.relay_prefix = 0;
3924     t->ip6rd.prefixlen = 16;
3925     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3926     index 7178476b3d2f..6378f6fbc89f 100644
3927     --- a/net/ipv6/tcp_ipv6.c
3928     +++ b/net/ipv6/tcp_ipv6.c
3929     @@ -943,7 +943,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
3930    
3931     if (sk) {
3932     oif = sk->sk_bound_dev_if;
3933     - trace_tcp_send_reset(sk, skb);
3934     + if (sk_fullsock(sk))
3935     + trace_tcp_send_reset(sk, skb);
3936     }
3937    
3938     tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
3939     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
3940     index 115918ad8eca..861b67c34191 100644
3941     --- a/net/l2tp/l2tp_core.c
3942     +++ b/net/l2tp/l2tp_core.c
3943     @@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
3944    
3945     }
3946    
3947     -/* Lookup the tunnel socket, possibly involving the fs code if the socket is
3948     - * owned by userspace. A struct sock returned from this function must be
3949     - * released using l2tp_tunnel_sock_put once you're done with it.
3950     - */
3951     -static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
3952     -{
3953     - int err = 0;
3954     - struct socket *sock = NULL;
3955     - struct sock *sk = NULL;
3956     -
3957     - if (!tunnel)
3958     - goto out;
3959     -
3960     - if (tunnel->fd >= 0) {
3961     - /* Socket is owned by userspace, who might be in the process
3962     - * of closing it. Look the socket up using the fd to ensure
3963     - * consistency.
3964     - */
3965     - sock = sockfd_lookup(tunnel->fd, &err);
3966     - if (sock)
3967     - sk = sock->sk;
3968     - } else {
3969     - /* Socket is owned by kernelspace */
3970     - sk = tunnel->sock;
3971     - sock_hold(sk);
3972     - }
3973     -
3974     -out:
3975     - return sk;
3976     -}
3977     -
3978     -/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
3979     -static void l2tp_tunnel_sock_put(struct sock *sk)
3980     -{
3981     - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
3982     - if (tunnel) {
3983     - if (tunnel->fd >= 0) {
3984     - /* Socket is owned by userspace */
3985     - sockfd_put(sk->sk_socket);
3986     - }
3987     - sock_put(sk);
3988     - }
3989     - sock_put(sk);
3990     -}
3991     -
3992     /* Session hash list.
3993     * The session_id SHOULD be random according to RFC2661, but several
3994     * L2TP implementations (Cisco and Microsoft) use incrementing
3995     @@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
3996     return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
3997     }
3998    
3999     +void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
4000     +{
4001     + sock_put(tunnel->sock);
4002     + /* the tunnel is freed in the socket destructor */
4003     +}
4004     +EXPORT_SYMBOL(l2tp_tunnel_free);
4005     +
4006     /* Lookup a tunnel. A new reference is held on the returned tunnel. */
4007     struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
4008     {
4009     @@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session,
4010     }
4011    
4012     l2tp_tunnel_inc_refcount(tunnel);
4013     - sock_hold(tunnel->sock);
4014     hlist_add_head_rcu(&session->global_hlist, g_head);
4015    
4016     spin_unlock_bh(&pn->l2tp_session_hlist_lock);
4017     } else {
4018     l2tp_tunnel_inc_refcount(tunnel);
4019     - sock_hold(tunnel->sock);
4020     }
4021    
4022     hlist_add_head(&session->hlist, head);
4023     @@ -975,7 +935,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
4024     {
4025     struct l2tp_tunnel *tunnel;
4026    
4027     - tunnel = l2tp_sock_to_tunnel(sk);
4028     + tunnel = l2tp_tunnel(sk);
4029     if (tunnel == NULL)
4030     goto pass_up;
4031    
4032     @@ -983,13 +943,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
4033     tunnel->name, skb->len);
4034    
4035     if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
4036     - goto pass_up_put;
4037     + goto pass_up;
4038    
4039     - sock_put(sk);
4040     return 0;
4041    
4042     -pass_up_put:
4043     - sock_put(sk);
4044     pass_up:
4045     return 1;
4046     }
4047     @@ -1216,14 +1173,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
4048     static void l2tp_tunnel_destruct(struct sock *sk)
4049     {
4050     struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
4051     - struct l2tp_net *pn;
4052    
4053     if (tunnel == NULL)
4054     goto end;
4055    
4056     l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
4057    
4058     -
4059     /* Disable udp encapsulation */
4060     switch (tunnel->encap) {
4061     case L2TP_ENCAPTYPE_UDP:
4062     @@ -1240,18 +1195,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
4063     sk->sk_destruct = tunnel->old_sk_destruct;
4064     sk->sk_user_data = NULL;
4065    
4066     - /* Remove the tunnel struct from the tunnel list */
4067     - pn = l2tp_pernet(tunnel->l2tp_net);
4068     - spin_lock_bh(&pn->l2tp_tunnel_list_lock);
4069     - list_del_rcu(&tunnel->list);
4070     - spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
4071     -
4072     - tunnel->sock = NULL;
4073     - l2tp_tunnel_dec_refcount(tunnel);
4074     -
4075     /* Call the original destructor */
4076     if (sk->sk_destruct)
4077     (*sk->sk_destruct)(sk);
4078     +
4079     + kfree_rcu(tunnel, rcu);
4080     end:
4081     return;
4082     }
4083     @@ -1312,49 +1260,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
4084     /* Tunnel socket destroy hook for UDP encapsulation */
4085     static void l2tp_udp_encap_destroy(struct sock *sk)
4086     {
4087     - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
4088     - if (tunnel) {
4089     - l2tp_tunnel_closeall(tunnel);
4090     - sock_put(sk);
4091     - }
4092     + struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
4093     +
4094     + if (tunnel)
4095     + l2tp_tunnel_delete(tunnel);
4096     }
4097    
4098     /* Workqueue tunnel deletion function */
4099     static void l2tp_tunnel_del_work(struct work_struct *work)
4100     {
4101     - struct l2tp_tunnel *tunnel = NULL;
4102     - struct socket *sock = NULL;
4103     - struct sock *sk = NULL;
4104     -
4105     - tunnel = container_of(work, struct l2tp_tunnel, del_work);
4106     + struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
4107     + del_work);
4108     + struct sock *sk = tunnel->sock;
4109     + struct socket *sock = sk->sk_socket;
4110     + struct l2tp_net *pn;
4111    
4112     l2tp_tunnel_closeall(tunnel);
4113    
4114     - sk = l2tp_tunnel_sock_lookup(tunnel);
4115     - if (!sk)
4116     - goto out;
4117     -
4118     - sock = sk->sk_socket;
4119     -
4120     - /* If the tunnel socket was created by userspace, then go through the
4121     - * inet layer to shut the socket down, and let userspace close it.
4122     - * Otherwise, if we created the socket directly within the kernel, use
4123     + /* If the tunnel socket was created within the kernel, use
4124     * the sk API to release it here.
4125     - * In either case the tunnel resources are freed in the socket
4126     - * destructor when the tunnel socket goes away.
4127     */
4128     - if (tunnel->fd >= 0) {
4129     - if (sock)
4130     - inet_shutdown(sock, 2);
4131     - } else {
4132     + if (tunnel->fd < 0) {
4133     if (sock) {
4134     kernel_sock_shutdown(sock, SHUT_RDWR);
4135     sock_release(sock);
4136     }
4137     }
4138    
4139     - l2tp_tunnel_sock_put(sk);
4140     -out:
4141     + /* Remove the tunnel struct from the tunnel list */
4142     + pn = l2tp_pernet(tunnel->l2tp_net);
4143     + spin_lock_bh(&pn->l2tp_tunnel_list_lock);
4144     + list_del_rcu(&tunnel->list);
4145     + spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
4146     +
4147     + /* drop initial ref */
4148     + l2tp_tunnel_dec_refcount(tunnel);
4149     +
4150     + /* drop workqueue ref */
4151     l2tp_tunnel_dec_refcount(tunnel);
4152     }
4153    
4154     @@ -1607,13 +1549,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
4155     sk->sk_user_data = tunnel;
4156     }
4157    
4158     + /* Bump the reference count. The tunnel context is deleted
4159     + * only when this drops to zero. A reference is also held on
4160     + * the tunnel socket to ensure that it is not released while
4161     + * the tunnel is extant. Must be done before sk_destruct is
4162     + * set.
4163     + */
4164     + refcount_set(&tunnel->ref_count, 1);
4165     + sock_hold(sk);
4166     + tunnel->sock = sk;
4167     + tunnel->fd = fd;
4168     +
4169     /* Hook on the tunnel socket destructor so that we can cleanup
4170     * if the tunnel socket goes away.
4171     */
4172     tunnel->old_sk_destruct = sk->sk_destruct;
4173     sk->sk_destruct = &l2tp_tunnel_destruct;
4174     - tunnel->sock = sk;
4175     - tunnel->fd = fd;
4176     lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
4177    
4178     sk->sk_allocation = GFP_ATOMIC;
4179     @@ -1623,11 +1574,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
4180    
4181     /* Add tunnel to our list */
4182     INIT_LIST_HEAD(&tunnel->list);
4183     -
4184     - /* Bump the reference count. The tunnel context is deleted
4185     - * only when this drops to zero. Must be done before list insertion
4186     - */
4187     - refcount_set(&tunnel->ref_count, 1);
4188     spin_lock_bh(&pn->l2tp_tunnel_list_lock);
4189     list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
4190     spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
4191     @@ -1668,8 +1614,6 @@ void l2tp_session_free(struct l2tp_session *session)
4192    
4193     if (tunnel) {
4194     BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
4195     - sock_put(tunnel->sock);
4196     - session->tunnel = NULL;
4197     l2tp_tunnel_dec_refcount(tunnel);
4198     }
4199    
4200     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
4201     index 9534e16965cc..8ecb1d357445 100644
4202     --- a/net/l2tp/l2tp_core.h
4203     +++ b/net/l2tp/l2tp_core.h
4204     @@ -219,27 +219,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
4205     return &session->priv[0];
4206     }
4207    
4208     -static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
4209     -{
4210     - struct l2tp_tunnel *tunnel;
4211     -
4212     - if (sk == NULL)
4213     - return NULL;
4214     -
4215     - sock_hold(sk);
4216     - tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
4217     - if (tunnel == NULL) {
4218     - sock_put(sk);
4219     - goto out;
4220     - }
4221     -
4222     - BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
4223     -
4224     -out:
4225     - return tunnel;
4226     -}
4227     -
4228     struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
4229     +void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
4230    
4231     struct l2tp_session *l2tp_session_get(const struct net *net,
4232     struct l2tp_tunnel *tunnel,
4233     @@ -288,7 +269,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
4234     static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
4235     {
4236     if (refcount_dec_and_test(&tunnel->ref_count))
4237     - kfree_rcu(tunnel, rcu);
4238     + l2tp_tunnel_free(tunnel);
4239     }
4240    
4241     /* Session reference counts. Incremented when code obtains a reference
4242     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
4243     index ff61124fdf59..3428fba6f2b7 100644
4244     --- a/net/l2tp/l2tp_ip.c
4245     +++ b/net/l2tp/l2tp_ip.c
4246     @@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
4247     static void l2tp_ip_destroy_sock(struct sock *sk)
4248     {
4249     struct sk_buff *skb;
4250     - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
4251     + struct l2tp_tunnel *tunnel = sk->sk_user_data;
4252    
4253     while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
4254     kfree_skb(skb);
4255    
4256     - if (tunnel) {
4257     - l2tp_tunnel_closeall(tunnel);
4258     - sock_put(sk);
4259     - }
4260     -
4261     - sk_refcnt_debug_dec(sk);
4262     + if (tunnel)
4263     + l2tp_tunnel_delete(tunnel);
4264     }
4265    
4266     static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
4267     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
4268     index 192344688c06..6f009eaa5fbe 100644
4269     --- a/net/l2tp/l2tp_ip6.c
4270     +++ b/net/l2tp/l2tp_ip6.c
4271     @@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
4272    
4273     static void l2tp_ip6_destroy_sock(struct sock *sk)
4274     {
4275     - struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
4276     + struct l2tp_tunnel *tunnel = sk->sk_user_data;
4277    
4278     lock_sock(sk);
4279     ip6_flush_pending_frames(sk);
4280     release_sock(sk);
4281    
4282     - if (tunnel) {
4283     - l2tp_tunnel_closeall(tunnel);
4284     - sock_put(sk);
4285     - }
4286     + if (tunnel)
4287     + l2tp_tunnel_delete(tunnel);
4288    
4289     inet6_destroy_sock(sk);
4290     }
4291     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
4292     index b412fc3351dc..5ea718609fe8 100644
4293     --- a/net/l2tp/l2tp_ppp.c
4294     +++ b/net/l2tp/l2tp_ppp.c
4295     @@ -416,20 +416,28 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
4296     * Session (and tunnel control) socket create/destroy.
4297     *****************************************************************************/
4298    
4299     +static void pppol2tp_put_sk(struct rcu_head *head)
4300     +{
4301     + struct pppol2tp_session *ps;
4302     +
4303     + ps = container_of(head, typeof(*ps), rcu);
4304     + sock_put(ps->__sk);
4305     +}
4306     +
4307     /* Called by l2tp_core when a session socket is being closed.
4308     */
4309     static void pppol2tp_session_close(struct l2tp_session *session)
4310     {
4311     - struct sock *sk;
4312     -
4313     - BUG_ON(session->magic != L2TP_SESSION_MAGIC);
4314     + struct pppol2tp_session *ps;
4315    
4316     - sk = pppol2tp_session_get_sock(session);
4317     - if (sk) {
4318     - if (sk->sk_socket)
4319     - inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
4320     - sock_put(sk);
4321     - }
4322     + ps = l2tp_session_priv(session);
4323     + mutex_lock(&ps->sk_lock);
4324     + ps->__sk = rcu_dereference_protected(ps->sk,
4325     + lockdep_is_held(&ps->sk_lock));
4326     + RCU_INIT_POINTER(ps->sk, NULL);
4327     + if (ps->__sk)
4328     + call_rcu(&ps->rcu, pppol2tp_put_sk);
4329     + mutex_unlock(&ps->sk_lock);
4330     }
4331    
4332     /* Really kill the session socket. (Called from sock_put() if
4333     @@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
4334     }
4335     }
4336    
4337     -static void pppol2tp_put_sk(struct rcu_head *head)
4338     -{
4339     - struct pppol2tp_session *ps;
4340     -
4341     - ps = container_of(head, typeof(*ps), rcu);
4342     - sock_put(ps->__sk);
4343     -}
4344     -
4345     /* Called when the PPPoX socket (session) is closed.
4346     */
4347     static int pppol2tp_release(struct socket *sock)
4348     @@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
4349     sock_orphan(sk);
4350     sock->sk = NULL;
4351    
4352     + /* If the socket is associated with a session,
4353     + * l2tp_session_delete will call pppol2tp_session_close which
4354     + * will drop the session's ref on the socket.
4355     + */
4356     session = pppol2tp_sock_to_session(sk);
4357     -
4358     - if (session != NULL) {
4359     - struct pppol2tp_session *ps;
4360     -
4361     + if (session) {
4362     l2tp_session_delete(session);
4363     -
4364     - ps = l2tp_session_priv(session);
4365     - mutex_lock(&ps->sk_lock);
4366     - ps->__sk = rcu_dereference_protected(ps->sk,
4367     - lockdep_is_held(&ps->sk_lock));
4368     - RCU_INIT_POINTER(ps->sk, NULL);
4369     - mutex_unlock(&ps->sk_lock);
4370     - call_rcu(&ps->rcu, pppol2tp_put_sk);
4371     -
4372     - /* Rely on the sock_put() call at the end of the function for
4373     - * dropping the reference held by pppol2tp_sock_to_session().
4374     - * The last reference will be dropped by pppol2tp_put_sk().
4375     - */
4376     + /* drop the ref obtained by pppol2tp_sock_to_session */
4377     + sock_put(sk);
4378     }
4379     +
4380     release_sock(sk);
4381    
4382     /* This will delete the session context via
4383     @@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
4384    
4385     out_no_ppp:
4386     /* This is how we get the session context from the socket. */
4387     + sock_hold(sk);
4388     sk->sk_user_data = session;
4389     rcu_assign_pointer(ps->sk, sk);
4390     mutex_unlock(&ps->sk_lock);
4391     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4392     index 84a4e4c3be4b..ca9c0544c856 100644
4393     --- a/net/netlink/af_netlink.c
4394     +++ b/net/netlink/af_netlink.c
4395     @@ -2275,7 +2275,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
4396     if (cb->start) {
4397     ret = cb->start(cb);
4398     if (ret)
4399     - goto error_unlock;
4400     + goto error_put;
4401     }
4402    
4403     nlk->cb_running = true;
4404     @@ -2295,6 +2295,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
4405     */
4406     return -EINTR;
4407    
4408     +error_put:
4409     + module_put(control->module);
4410     error_unlock:
4411     sock_put(sk);
4412     mutex_unlock(nlk->cb_mutex);
4413     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
4414     index d444daf1ac04..6f02499ef007 100644
4415     --- a/net/netlink/genetlink.c
4416     +++ b/net/netlink/genetlink.c
4417     @@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
4418     {
4419     struct sk_buff *tmp;
4420     struct net *net, *prev = NULL;
4421     + bool delivered = false;
4422     int err;
4423    
4424     for_each_net_rcu(net) {
4425     @@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
4426     }
4427     err = nlmsg_multicast(prev->genl_sock, tmp,
4428     portid, group, flags);
4429     - if (err)
4430     + if (!err)
4431     + delivered = true;
4432     + else if (err != -ESRCH)
4433     goto error;
4434     }
4435    
4436     prev = net;
4437     }
4438    
4439     - return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
4440     + err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
4441     + if (!err)
4442     + delivered = true;
4443     + else if (err != -ESRCH)
4444     + goto error;
4445     + return delivered ? 0 : -ESRCH;
4446     error:
4447     kfree_skb(skb);
4448     return err;
4449     diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
4450     index 42410e910aff..cf73dc006c3b 100644
4451     --- a/net/rxrpc/output.c
4452     +++ b/net/rxrpc/output.c
4453     @@ -445,7 +445,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
4454     (char *)&opt, sizeof(opt));
4455     if (ret == 0) {
4456     ret = kernel_sendmsg(conn->params.local->socket, &msg,
4457     - iov, 1, iov[0].iov_len);
4458     + iov, 2, len);
4459    
4460     opt = IPV6_PMTUDISC_DO;
4461     kernel_setsockopt(conn->params.local->socket,
4462     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
4463     index e6b853f0ee4f..2e437bbd3358 100644
4464     --- a/net/sched/cls_api.c
4465     +++ b/net/sched/cls_api.c
4466     @@ -1054,13 +1054,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
4467     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
4468     continue;
4469     if (!tcf_chain_dump(chain, q, parent, skb, cb,
4470     - index_start, &index))
4471     + index_start, &index)) {
4472     + err = -EMSGSIZE;
4473     break;
4474     + }
4475     }
4476    
4477     cb->args[0] = index;
4478    
4479     out:
4480     + /* If we did no progress, the error (EMSGSIZE) is real */
4481     + if (skb->len == 0 && err)
4482     + return err;
4483     return skb->len;
4484     }
4485    
4486     diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
4487     index 33294b5b2c6a..425cc341fd41 100644
4488     --- a/net/sched/cls_u32.c
4489     +++ b/net/sched/cls_u32.c
4490     @@ -397,10 +397,12 @@ static int u32_init(struct tcf_proto *tp)
4491     static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
4492     bool free_pf)
4493     {
4494     + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
4495     +
4496     tcf_exts_destroy(&n->exts);
4497     tcf_exts_put_net(&n->exts);
4498     - if (n->ht_down)
4499     - n->ht_down->refcnt--;
4500     + if (ht && --ht->refcnt == 0)
4501     + kfree(ht);
4502     #ifdef CONFIG_CLS_U32_PERF
4503     if (free_pf)
4504     free_percpu(n->pf);
4505     @@ -653,16 +655,15 @@ static void u32_destroy(struct tcf_proto *tp)
4506    
4507     hlist_del(&tp_c->hnode);
4508    
4509     - for (ht = rtnl_dereference(tp_c->hlist);
4510     - ht;
4511     - ht = rtnl_dereference(ht->next)) {
4512     - ht->refcnt--;
4513     - u32_clear_hnode(tp, ht);
4514     - }
4515     -
4516     while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
4517     + u32_clear_hnode(tp, ht);
4518     RCU_INIT_POINTER(tp_c->hlist, ht->next);
4519     - kfree_rcu(ht, rcu);
4520     +
4521     + /* u32_destroy_key() will later free ht for us, if it's
4522     + * still referenced by some knode
4523     + */
4524     + if (--ht->refcnt == 0)
4525     + kfree_rcu(ht, rcu);
4526     }
4527    
4528     idr_destroy(&tp_c->handle_idr);
4529     @@ -928,7 +929,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
4530     if (TC_U32_KEY(n->handle) == 0)
4531     return -EINVAL;
4532    
4533     - if (n->flags != flags)
4534     + if ((n->flags ^ flags) &
4535     + ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW))
4536     return -EINVAL;
4537    
4538     new = u32_init_knode(tp, n);
4539     diff --git a/net/sctp/input.c b/net/sctp/input.c
4540     index 141c9c466ec1..0247cc432e02 100644
4541     --- a/net/sctp/input.c
4542     +++ b/net/sctp/input.c
4543     @@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
4544     rhl_for_each_entry_rcu(transport, tmp, list, node)
4545     if (transport->asoc->ep == t->asoc->ep) {
4546     rcu_read_unlock();
4547     - err = -EEXIST;
4548     - goto out;
4549     + return -EEXIST;
4550     }
4551     rcu_read_unlock();
4552    
4553     err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
4554     &t->node, sctp_hash_params);
4555     -
4556     -out:
4557     if (err)
4558     pr_err_once("insert transport fail, errno %d\n", err);
4559    
4560     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
4561     index 5d4c15bf66d2..e35d4f73d2df 100644
4562     --- a/net/sctp/ipv6.c
4563     +++ b/net/sctp/ipv6.c
4564     @@ -326,8 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
4565     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
4566     bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
4567    
4568     - if (!IS_ERR(bdst) &&
4569     - ipv6_chk_addr(dev_net(bdst->dev),
4570     + if (IS_ERR(bdst))
4571     + continue;
4572     +
4573     + if (ipv6_chk_addr(dev_net(bdst->dev),
4574     &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
4575     if (!IS_ERR_OR_NULL(dst))
4576     dst_release(dst);
4577     @@ -336,8 +338,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
4578     }
4579    
4580     bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
4581     - if (matchlen > bmatchlen)
4582     + if (matchlen > bmatchlen) {
4583     + dst_release(bdst);
4584     continue;
4585     + }
4586    
4587     if (!IS_ERR_OR_NULL(dst))
4588     dst_release(dst);
4589     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
4590     index 6a38c2503649..91813e686c67 100644
4591     --- a/net/sctp/protocol.c
4592     +++ b/net/sctp/protocol.c
4593     @@ -514,22 +514,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
4594     if (IS_ERR(rt))
4595     continue;
4596    
4597     - if (!dst)
4598     - dst = &rt->dst;
4599     -
4600     /* Ensure the src address belongs to the output
4601     * interface.
4602     */
4603     odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
4604     false);
4605     if (!odev || odev->ifindex != fl4->flowi4_oif) {
4606     - if (&rt->dst != dst)
4607     + if (!dst)
4608     + dst = &rt->dst;
4609     + else
4610     dst_release(&rt->dst);
4611     continue;
4612     }
4613    
4614     - if (dst != &rt->dst)
4615     - dst_release(dst);
4616     + dst_release(dst);
4617     dst = &rt->dst;
4618     break;
4619     }
4620     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
4621     index 9bf575f2e8ed..ea4226e382f9 100644
4622     --- a/net/sctp/sm_make_chunk.c
4623     +++ b/net/sctp/sm_make_chunk.c
4624     @@ -1378,9 +1378,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
4625     struct sctp_chunk *retval;
4626     struct sk_buff *skb;
4627     struct sock *sk;
4628     + int chunklen;
4629     +
4630     + chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
4631     + if (chunklen > SCTP_MAX_CHUNK_LEN)
4632     + goto nodata;
4633    
4634     /* No need to allocate LL here, as this is only a chunk. */
4635     - skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
4636     + skb = alloc_skb(chunklen, gfp);
4637     if (!skb)
4638     goto nodata;
4639    
4640     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
4641     index 736719c8314e..3a780337c393 100644
4642     --- a/net/tls/tls_main.c
4643     +++ b/net/tls/tls_main.c
4644     @@ -45,17 +45,27 @@ MODULE_AUTHOR("Mellanox Technologies");
4645     MODULE_DESCRIPTION("Transport Layer Security Support");
4646     MODULE_LICENSE("Dual BSD/GPL");
4647    
4648     +enum {
4649     + TLSV4,
4650     + TLSV6,
4651     + TLS_NUM_PROTS,
4652     +};
4653     +
4654     enum {
4655     TLS_BASE_TX,
4656     TLS_SW_TX,
4657     TLS_NUM_CONFIG,
4658     };
4659    
4660     -static struct proto tls_prots[TLS_NUM_CONFIG];
4661     +static struct proto *saved_tcpv6_prot;
4662     +static DEFINE_MUTEX(tcpv6_prot_mutex);
4663     +static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
4664    
4665     static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
4666     {
4667     - sk->sk_prot = &tls_prots[ctx->tx_conf];
4668     + int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
4669     +
4670     + sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
4671     }
4672    
4673     int wait_on_pending_writer(struct sock *sk, long *timeo)
4674     @@ -450,8 +460,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
4675     return do_tls_setsockopt(sk, optname, optval, optlen);
4676     }
4677    
4678     +static void build_protos(struct proto *prot, struct proto *base)
4679     +{
4680     + prot[TLS_BASE_TX] = *base;
4681     + prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
4682     + prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
4683     + prot[TLS_BASE_TX].close = tls_sk_proto_close;
4684     +
4685     + prot[TLS_SW_TX] = prot[TLS_BASE_TX];
4686     + prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
4687     + prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
4688     +}
4689     +
4690     static int tls_init(struct sock *sk)
4691     {
4692     + int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
4693     struct inet_connection_sock *icsk = inet_csk(sk);
4694     struct tls_context *ctx;
4695     int rc = 0;
4696     @@ -476,6 +499,17 @@ static int tls_init(struct sock *sk)
4697     ctx->getsockopt = sk->sk_prot->getsockopt;
4698     ctx->sk_proto_close = sk->sk_prot->close;
4699    
4700     + /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
4701     + if (ip_ver == TLSV6 &&
4702     + unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
4703     + mutex_lock(&tcpv6_prot_mutex);
4704     + if (likely(sk->sk_prot != saved_tcpv6_prot)) {
4705     + build_protos(tls_prots[TLSV6], sk->sk_prot);
4706     + smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
4707     + }
4708     + mutex_unlock(&tcpv6_prot_mutex);
4709     + }
4710     +
4711     ctx->tx_conf = TLS_BASE_TX;
4712     update_sk_prot(sk, ctx);
4713     out:
4714     @@ -488,21 +522,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
4715     .init = tls_init,
4716     };
4717    
4718     -static void build_protos(struct proto *prot, struct proto *base)
4719     -{
4720     - prot[TLS_BASE_TX] = *base;
4721     - prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
4722     - prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
4723     - prot[TLS_BASE_TX].close = tls_sk_proto_close;
4724     -
4725     - prot[TLS_SW_TX] = prot[TLS_BASE_TX];
4726     - prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
4727     - prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
4728     -}
4729     -
4730     static int __init tls_register(void)
4731     {
4732     - build_protos(tls_prots, &tcp_prot);
4733     + build_protos(tls_prots[TLSV4], &tcp_prot);
4734    
4735     tcp_register_ulp(&tcp_tls_ulp_ops);
4736    
4737     diff --git a/sound/core/control.c b/sound/core/control.c
4738     index 56b3e2d49c82..af7e6165e21e 100644
4739     --- a/sound/core/control.c
4740     +++ b/sound/core/control.c
4741     @@ -888,7 +888,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
4742    
4743     index_offset = snd_ctl_get_ioff(kctl, &control->id);
4744     vd = &kctl->vd[index_offset];
4745     - if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
4746     + if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
4747     return -EPERM;
4748    
4749     snd_ctl_build_ioff(&control->id, kctl, index_offset);
4750     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4751     index c71dcacea807..96143df19b21 100644
4752     --- a/sound/pci/hda/hda_intel.c
4753     +++ b/sound/pci/hda/hda_intel.c
4754     @@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
4755     };
4756     #define param_check_xint param_check_int
4757    
4758     -static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
4759     +static int power_save = -1;
4760     module_param(power_save, xint, 0644);
4761     MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
4762     "(in second, 0 = disable).");
4763     @@ -2186,6 +2186,24 @@ static int azx_probe(struct pci_dev *pci,
4764     return err;
4765     }
4766    
4767     +#ifdef CONFIG_PM
4768     +/* On some boards setting power_save to a non 0 value leads to clicking /
4769     + * popping sounds when ever we enter/leave powersaving mode. Ideally we would
4770     + * figure out how to avoid these sounds, but that is not always feasible.
4771     + * So we keep a list of devices where we disable powersaving as its known
4772     + * to causes problems on these devices.
4773     + */
4774     +static struct snd_pci_quirk power_save_blacklist[] = {
4775     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
4776     + SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
4777     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
4778     + SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
4779     + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
4780     + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
4781     + {}
4782     +};
4783     +#endif /* CONFIG_PM */
4784     +
4785     /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
4786     static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
4787     [AZX_DRIVER_NVIDIA] = 8,
4788     @@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
4789     struct hdac_bus *bus = azx_bus(chip);
4790     struct pci_dev *pci = chip->pci;
4791     int dev = chip->dev_index;
4792     + int val;
4793     int err;
4794    
4795     hda->probe_continued = 1;
4796     @@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
4797    
4798     chip->running = 1;
4799     azx_add_card_list(chip);
4800     - snd_hda_set_power_save(&chip->bus, power_save * 1000);
4801     +
4802     + val = power_save;
4803     +#ifdef CONFIG_PM
4804     + if (val == -1) {
4805     + const struct snd_pci_quirk *q;
4806     +
4807     + val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
4808     + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
4809     + if (q && val) {
4810     + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
4811     + q->subvendor, q->subdevice);
4812     + val = 0;
4813     + }
4814     + }
4815     +#endif /* CONFIG_PM */
4816     + snd_hda_set_power_save(&chip->bus, val * 1000);
4817     if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
4818     pm_runtime_put_autosuspend(&pci->dev);
4819    
4820     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4821     index 4ff1f0ca52fc..8fe38c18e29d 100644
4822     --- a/sound/pci/hda/patch_realtek.c
4823     +++ b/sound/pci/hda/patch_realtek.c
4824     @@ -4875,13 +4875,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
4825    
4826     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4827     spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4828     + snd_hda_apply_pincfgs(codec, pincfgs);
4829     + } else if (action == HDA_FIXUP_ACT_INIT) {
4830     /* Enable DOCK device */
4831     snd_hda_codec_write(codec, 0x17, 0,
4832     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
4833     /* Enable DOCK device */
4834     snd_hda_codec_write(codec, 0x19, 0,
4835     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
4836     - snd_hda_apply_pincfgs(codec, pincfgs);
4837     }
4838     }
4839    
4840     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
4841     index 8a59d4782a0f..69bf5cf1e91e 100644
4842     --- a/sound/usb/quirks-table.h
4843     +++ b/sound/usb/quirks-table.h
4844     @@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
4845     }
4846     },
4847    
4848     +{
4849     + /*
4850     + * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
4851     + * even though it advertises more. The capture interface doesn't work
4852     + * even on windows.
4853     + */
4854     + USB_DEVICE(0x19b5, 0x0021),
4855     + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
4856     + .ifnum = QUIRK_ANY_INTERFACE,
4857     + .type = QUIRK_COMPOSITE,
4858     + .data = (const struct snd_usb_audio_quirk[]) {
4859     + {
4860     + .ifnum = 0,
4861     + .type = QUIRK_AUDIO_STANDARD_MIXER,
4862     + },
4863     + /* Capture */
4864     + {
4865     + .ifnum = 1,
4866     + .type = QUIRK_IGNORE_INTERFACE,
4867     + },
4868     + /* Playback */
4869     + {
4870     + .ifnum = 2,
4871     + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
4872     + .data = &(const struct audioformat) {
4873     + .formats = SNDRV_PCM_FMTBIT_S16_LE,
4874     + .channels = 2,
4875     + .iface = 2,
4876     + .altsetting = 1,
4877     + .altset_idx = 1,
4878     + .attributes = UAC_EP_CS_ATTR_FILL_MAX |
4879     + UAC_EP_CS_ATTR_SAMPLE_RATE,
4880     + .endpoint = 0x03,
4881     + .ep_attr = USB_ENDPOINT_XFER_ISOC,
4882     + .rates = SNDRV_PCM_RATE_48000,
4883     + .rate_min = 48000,
4884     + .rate_max = 48000,
4885     + .nr_rates = 1,
4886     + .rate_table = (unsigned int[]) {
4887     + 48000
4888     + }
4889     + }
4890     + },
4891     + }
4892     + }
4893     +},
4894     +
4895     #undef USB_DEVICE_VENDOR_SPEC
4896     diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
4897     index a0951505c7f5..697872d8308e 100644
4898     --- a/sound/x86/intel_hdmi_audio.c
4899     +++ b/sound/x86/intel_hdmi_audio.c
4900     @@ -1827,6 +1827,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
4901     ctx->port = port;
4902     ctx->pipe = -1;
4903    
4904     + spin_lock_init(&ctx->had_spinlock);
4905     + mutex_init(&ctx->mutex);
4906     INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
4907    
4908     ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
4909     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4910     index 210bf820385a..e536977e7b6d 100644
4911     --- a/virt/kvm/kvm_main.c
4912     +++ b/virt/kvm/kvm_main.c
4913     @@ -974,8 +974,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
4914     /* Check for overlaps */
4915     r = -EEXIST;
4916     kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
4917     - if ((slot->id >= KVM_USER_MEM_SLOTS) ||
4918     - (slot->id == id))
4919     + if (slot->id == id)
4920     continue;
4921     if (!((base_gfn + npages <= slot->base_gfn) ||
4922     (base_gfn >= slot->base_gfn + slot->npages)))