Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0124-4.14.25-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 136084 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2     index 77f4de59dc9c..d499676890d8 100644
3     --- a/Documentation/networking/ip-sysctl.txt
4     +++ b/Documentation/networking/ip-sysctl.txt
5     @@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
6     min: Minimal size of receive buffer used by TCP sockets.
7     It is guaranteed to each TCP socket, even under moderate memory
8     pressure.
9     - Default: 1 page
10     + Default: 4K
11    
12     default: initial size of receive buffer used by TCP sockets.
13     This value overrides net.core.rmem_default used by other protocols.
14     @@ -666,7 +666,7 @@ tcp_window_scaling - BOOLEAN
15     tcp_wmem - vector of 3 INTEGERs: min, default, max
16     min: Amount of memory reserved for send buffers for TCP sockets.
17     Each TCP socket has rights to use it due to fact of its birth.
18     - Default: 1 page
19     + Default: 4K
20    
21     default: initial size of send buffer used by TCP sockets. This
22     value overrides net.core.wmem_default used by other protocols.
23     diff --git a/Makefile b/Makefile
24     index 38acc6047d7d..0fdae0f455ef 100644
25     --- a/Makefile
26     +++ b/Makefile
27     @@ -1,7 +1,7 @@
28     # SPDX-License-Identifier: GPL-2.0
29     VERSION = 4
30     PATCHLEVEL = 14
31     -SUBLEVEL = 24
32     +SUBLEVEL = 25
33     EXTRAVERSION =
34     NAME = Petit Gorille
35    
36     diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
37     index 4f2c5ec75714..e262fa9ef334 100644
38     --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
39     +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
40     @@ -97,6 +97,8 @@
41     };
42    
43     &i2c1 {
44     + pinctrl-names = "default";
45     + pinctrl-0 = <&i2c1_pins>;
46     clock-frequency = <2600000>;
47    
48     twl: twl@48 {
49     @@ -215,7 +217,12 @@
50     >;
51     };
52    
53     -
54     + i2c1_pins: pinmux_i2c1_pins {
55     + pinctrl-single,pins = <
56     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
57     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
58     + >;
59     + };
60     };
61    
62     &omap3_pmx_wkup {
63     diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
64     index 6d89736c7b44..cf22b35f0a28 100644
65     --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
66     +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
67     @@ -104,6 +104,8 @@
68     };
69    
70     &i2c1 {
71     + pinctrl-names = "default";
72     + pinctrl-0 = <&i2c1_pins>;
73     clock-frequency = <2600000>;
74    
75     twl: twl@48 {
76     @@ -211,6 +213,12 @@
77     OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
78     >;
79     };
80     + i2c1_pins: pinmux_i2c1_pins {
81     + pinctrl-single,pins = <
82     + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
83     + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
84     + >;
85     + };
86     };
87    
88     &uart2 {
89     diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
90     index 99cfae875e12..5eae4776ffde 100644
91     --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
92     +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
93     @@ -110,26 +110,6 @@
94     };
95     };
96    
97     -&cpu0 {
98     - cpu0-supply = <&vdd_cpu>;
99     - operating-points = <
100     - /* KHz uV */
101     - 1800000 1400000
102     - 1608000 1350000
103     - 1512000 1300000
104     - 1416000 1200000
105     - 1200000 1100000
106     - 1008000 1050000
107     - 816000 1000000
108     - 696000 950000
109     - 600000 900000
110     - 408000 900000
111     - 312000 900000
112     - 216000 900000
113     - 126000 900000
114     - >;
115     -};
116     -
117     &emmc {
118     status = "okay";
119     bus-width = <8>;
120     diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
121     index 5638ce0c9524..63d6b404d88e 100644
122     --- a/arch/arm/kvm/hyp/Makefile
123     +++ b/arch/arm/kvm/hyp/Makefile
124     @@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
125    
126     KVM=../../../../virt/kvm
127    
128     +CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
129     +
130     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
131     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
132     obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
133     @@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
134     obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
135     obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
136     obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
137     +CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
138     +
139     obj-$(CONFIG_KVM_ARM_HOST) += entry.o
140     obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
141     obj-$(CONFIG_KVM_ARM_HOST) += switch.o
142     +CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
143     obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
144     diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
145     index 111bda8cdebd..be4b8b0a40ad 100644
146     --- a/arch/arm/kvm/hyp/banked-sr.c
147     +++ b/arch/arm/kvm/hyp/banked-sr.c
148     @@ -20,6 +20,10 @@
149    
150     #include <asm/kvm_hyp.h>
151    
152     +/*
153     + * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
154     + * trick the assembler.
155     + */
156     __asm__(".arch_extension virt");
157    
158     void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
159     diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
160     index 9b49867154bf..63fa79f9f121 100644
161     --- a/arch/arm/mach-mvebu/Kconfig
162     +++ b/arch/arm/mach-mvebu/Kconfig
163     @@ -42,7 +42,7 @@ config MACH_ARMADA_375
164     depends on ARCH_MULTI_V7
165     select ARMADA_370_XP_IRQ
166     select ARM_ERRATA_720789
167     - select ARM_ERRATA_753970
168     + select PL310_ERRATA_753970
169     select ARM_GIC
170     select ARMADA_375_CLK
171     select HAVE_ARM_SCU
172     @@ -58,7 +58,7 @@ config MACH_ARMADA_38X
173     bool "Marvell Armada 380/385 boards"
174     depends on ARCH_MULTI_V7
175     select ARM_ERRATA_720789
176     - select ARM_ERRATA_753970
177     + select PL310_ERRATA_753970
178     select ARM_GIC
179     select ARM_GLOBAL_TIMER
180     select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
181     diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
182     index aff6994950ba..a2399fd66e97 100644
183     --- a/arch/arm/plat-orion/common.c
184     +++ b/arch/arm/plat-orion/common.c
185     @@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
186     /*****************************************************************************
187     * Ethernet switch
188     ****************************************************************************/
189     -static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
190     -static __initdata struct mdio_board_info
191     - orion_ge00_switch_board_info;
192     +static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
193     + .bus_id = "orion-mii",
194     + .modalias = "mv88e6085",
195     +};
196    
197     void __init orion_ge00_switch_init(struct dsa_chip_data *d)
198     {
199     - struct mdio_board_info *bd;
200     unsigned int i;
201    
202     if (!IS_BUILTIN(CONFIG_PHYLIB))
203     return;
204    
205     - for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
206     - if (!strcmp(d->port_names[i], "cpu"))
207     + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
208     + if (!strcmp(d->port_names[i], "cpu")) {
209     + d->netdev[i] = &orion_ge00.dev;
210     break;
211     + }
212     + }
213    
214     - bd = &orion_ge00_switch_board_info;
215     - bd->bus_id = orion_ge00_mvmdio_bus_name;
216     - bd->mdio_addr = d->sw_addr;
217     - d->netdev[i] = &orion_ge00.dev;
218     - strcpy(bd->modalias, "mv88e6085");
219     - bd->platform_data = d;
220     + orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
221     + orion_ge00_switch_board_info.platform_data = d;
222    
223     mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
224     }
225     diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
226     index 3742508cc534..bd5ce31936f5 100644
227     --- a/arch/parisc/include/asm/cacheflush.h
228     +++ b/arch/parisc/include/asm/cacheflush.h
229     @@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
230     void flush_kernel_icache_range_asm(unsigned long, unsigned long);
231     void flush_user_dcache_range_asm(unsigned long, unsigned long);
232     void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
233     +void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
234     void flush_kernel_dcache_page_asm(void *);
235     void flush_kernel_icache_page(void *);
236    
237     diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
238     index 0e6ab6e4a4e9..2dbe5580a1a4 100644
239     --- a/arch/parisc/include/asm/processor.h
240     +++ b/arch/parisc/include/asm/processor.h
241     @@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
242     #define parisc_requires_coherency() (0)
243     #endif
244    
245     +extern int running_on_qemu;
246     +
247     #endif /* __ASSEMBLY__ */
248    
249     #endif /* __ASM_PARISC_PROCESSOR_H */
250     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
251     index 19c0c141bc3f..79089778725b 100644
252     --- a/arch/parisc/kernel/cache.c
253     +++ b/arch/parisc/kernel/cache.c
254     @@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
255     int __flush_tlb_range(unsigned long sid, unsigned long start,
256     unsigned long end)
257     {
258     - unsigned long flags, size;
259     + unsigned long flags;
260    
261     - size = (end - start);
262     - if (size >= parisc_tlb_flush_threshold) {
263     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
264     + end - start >= parisc_tlb_flush_threshold) {
265     flush_tlb_all();
266     return 1;
267     }
268     @@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
269     struct vm_area_struct *vma;
270     pgd_t *pgd;
271    
272     - /* Flush the TLB to avoid speculation if coherency is required. */
273     - if (parisc_requires_coherency())
274     - flush_tlb_all();
275     -
276     /* Flushing the whole cache on each cpu takes forever on
277     rp3440, etc. So, avoid it if the mm isn't too big. */
278     - if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
279     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
280     + mm_total_size(mm) >= parisc_cache_flush_threshold) {
281     + flush_tlb_all();
282     flush_cache_all();
283     return;
284     }
285     @@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
286     if (mm->context == mfsp(3)) {
287     for (vma = mm->mmap; vma; vma = vma->vm_next) {
288     flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
289     - if ((vma->vm_flags & VM_EXEC) == 0)
290     - continue;
291     - flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
292     + if (vma->vm_flags & VM_EXEC)
293     + flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
294     + flush_tlb_range(vma, vma->vm_start, vma->vm_end);
295     }
296     return;
297     }
298     @@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
299     void flush_cache_range(struct vm_area_struct *vma,
300     unsigned long start, unsigned long end)
301     {
302     - BUG_ON(!vma->vm_mm->context);
303     -
304     - /* Flush the TLB to avoid speculation if coherency is required. */
305     - if (parisc_requires_coherency())
306     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
307     + end - start >= parisc_cache_flush_threshold) {
308     flush_tlb_range(vma, start, end);
309     -
310     - if ((end - start) >= parisc_cache_flush_threshold
311     - || vma->vm_mm->context != mfsp(3)) {
312     flush_cache_all();
313     return;
314     }
315     @@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
316     flush_user_dcache_range_asm(start, end);
317     if (vma->vm_flags & VM_EXEC)
318     flush_user_icache_range_asm(start, end);
319     + flush_tlb_range(vma, start, end);
320     }
321    
322     void
323     @@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
324     BUG_ON(!vma->vm_mm->context);
325    
326     if (pfn_valid(pfn)) {
327     - if (parisc_requires_coherency())
328     - flush_tlb_page(vma, vmaddr);
329     + flush_tlb_page(vma, vmaddr);
330     __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
331     }
332     }
333     @@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
334     void flush_kernel_vmap_range(void *vaddr, int size)
335     {
336     unsigned long start = (unsigned long)vaddr;
337     + unsigned long end = start + size;
338    
339     - if ((unsigned long)size > parisc_cache_flush_threshold)
340     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
341     + (unsigned long)size >= parisc_cache_flush_threshold) {
342     + flush_tlb_kernel_range(start, end);
343     flush_data_cache();
344     - else
345     - flush_kernel_dcache_range_asm(start, start + size);
346     + return;
347     + }
348     +
349     + flush_kernel_dcache_range_asm(start, end);
350     + flush_tlb_kernel_range(start, end);
351     }
352     EXPORT_SYMBOL(flush_kernel_vmap_range);
353    
354     void invalidate_kernel_vmap_range(void *vaddr, int size)
355     {
356     unsigned long start = (unsigned long)vaddr;
357     + unsigned long end = start + size;
358    
359     - if ((unsigned long)size > parisc_cache_flush_threshold)
360     + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
361     + (unsigned long)size >= parisc_cache_flush_threshold) {
362     + flush_tlb_kernel_range(start, end);
363     flush_data_cache();
364     - else
365     - flush_kernel_dcache_range_asm(start, start + size);
366     + return;
367     + }
368     +
369     + purge_kernel_dcache_range_asm(start, end);
370     + flush_tlb_kernel_range(start, end);
371     }
372     EXPORT_SYMBOL(invalidate_kernel_vmap_range);
373     diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
374     index 2d40c4ff3f69..67b0f7532e83 100644
375     --- a/arch/parisc/kernel/pacache.S
376     +++ b/arch/parisc/kernel/pacache.S
377     @@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
378     .procend
379     ENDPROC_CFI(flush_kernel_dcache_range_asm)
380    
381     +ENTRY_CFI(purge_kernel_dcache_range_asm)
382     + .proc
383     + .callinfo NO_CALLS
384     + .entry
385     +
386     + ldil L%dcache_stride, %r1
387     + ldw R%dcache_stride(%r1), %r23
388     + ldo -1(%r23), %r21
389     + ANDCM %r26, %r21, %r26
390     +
391     +1: cmpb,COND(<<),n %r26, %r25,1b
392     + pdc,m %r23(%r26)
393     +
394     + sync
395     + syncdma
396     + bv %r0(%r2)
397     + nop
398     + .exit
399     +
400     + .procend
401     +ENDPROC_CFI(purge_kernel_dcache_range_asm)
402     +
403     ENTRY_CFI(flush_user_icache_range_asm)
404     .proc
405     .callinfo NO_CALLS
406     diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
407     index 4b8fd6dc22da..f7e684560186 100644
408     --- a/arch/parisc/kernel/time.c
409     +++ b/arch/parisc/kernel/time.c
410     @@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
411     next_tick = cpuinfo->it_value;
412    
413     /* Calculate how many ticks have elapsed. */
414     + now = mfctl(16);
415     do {
416     ++ticks_elapsed;
417     next_tick += cpt;
418     - now = mfctl(16);
419     } while (next_tick - now > cpt);
420    
421     /* Store (in CR16 cycles) up to when we are accounting right now. */
422     @@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
423     * if one or the other wrapped. If "now" is "bigger" we'll end up
424     * with a very large unsigned number.
425     */
426     - while (next_tick - mfctl(16) > cpt)
427     + now = mfctl(16);
428     + while (next_tick - now > cpt)
429     next_tick += cpt;
430    
431     /* Program the IT when to deliver the next interrupt.
432     * Only bottom 32-bits of next_tick are writable in CR16!
433     * Timer interrupt will be delivered at least a few hundred cycles
434     - * after the IT fires, so if we are too close (<= 500 cycles) to the
435     + * after the IT fires, so if we are too close (<= 8000 cycles) to the
436     * next cycle, simply skip it.
437     */
438     - if (next_tick - mfctl(16) <= 500)
439     + if (next_tick - now <= 8000)
440     next_tick += cpt;
441     mtctl(next_tick, 16);
442    
443     @@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
444     * different sockets, so mark them unstable and lower rating on
445     * multi-socket SMP systems.
446     */
447     - if (num_online_cpus() > 1) {
448     + if (num_online_cpus() > 1 && !running_on_qemu) {
449     int cpu;
450     unsigned long cpu0_loc;
451     cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
452     diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
453     index a832ad031cee..5185be314661 100644
454     --- a/arch/s390/kvm/interrupt.c
455     +++ b/arch/s390/kvm/interrupt.c
456     @@ -173,8 +173,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
457    
458     static int ckc_irq_pending(struct kvm_vcpu *vcpu)
459     {
460     - if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
461     + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
462     + const u64 ckc = vcpu->arch.sie_block->ckc;
463     +
464     + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
465     + if ((s64)ckc >= (s64)now)
466     + return 0;
467     + } else if (ckc >= now) {
468     return 0;
469     + }
470     return ckc_interrupts_enabled(vcpu);
471     }
472    
473     @@ -1004,13 +1011,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
474    
475     static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
476     {
477     - u64 now, cputm, sltime = 0;
478     + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
479     + const u64 ckc = vcpu->arch.sie_block->ckc;
480     + u64 cputm, sltime = 0;
481    
482     if (ckc_interrupts_enabled(vcpu)) {
483     - now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
484     - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
485     - /* already expired or overflow? */
486     - if (!sltime || vcpu->arch.sie_block->ckc <= now)
487     + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
488     + if ((s64)now < (s64)ckc)
489     + sltime = tod_to_ns((s64)ckc - (s64)now);
490     + } else if (now < ckc) {
491     + sltime = tod_to_ns(ckc - now);
492     + }
493     + /* already expired */
494     + if (!sltime)
495     return 0;
496     if (cpu_timer_interrupts_enabled(vcpu)) {
497     cputm = kvm_s390_get_cpu_timer(vcpu);
498     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
499     index 6e3d80b2048e..f4f12ecd0cec 100644
500     --- a/arch/s390/kvm/kvm-s390.c
501     +++ b/arch/s390/kvm/kvm-s390.c
502     @@ -169,6 +169,28 @@ int kvm_arch_hardware_enable(void)
503     static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
504     unsigned long end);
505    
506     +static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
507     +{
508     + u8 delta_idx = 0;
509     +
510     + /*
511     + * The TOD jumps by delta, we have to compensate this by adding
512     + * -delta to the epoch.
513     + */
514     + delta = -delta;
515     +
516     + /* sign-extension - we're adding to signed values below */
517     + if ((s64)delta < 0)
518     + delta_idx = -1;
519     +
520     + scb->epoch += delta;
521     + if (scb->ecd & ECD_MEF) {
522     + scb->epdx += delta_idx;
523     + if (scb->epoch < delta)
524     + scb->epdx += 1;
525     + }
526     +}
527     +
528     /*
529     * This callback is executed during stop_machine(). All CPUs are therefore
530     * temporarily stopped. In order not to change guest behavior, we have to
531     @@ -184,13 +206,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
532     unsigned long long *delta = v;
533    
534     list_for_each_entry(kvm, &vm_list, vm_list) {
535     - kvm->arch.epoch -= *delta;
536     kvm_for_each_vcpu(i, vcpu, kvm) {
537     - vcpu->arch.sie_block->epoch -= *delta;
538     + kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
539     + if (i == 0) {
540     + kvm->arch.epoch = vcpu->arch.sie_block->epoch;
541     + kvm->arch.epdx = vcpu->arch.sie_block->epdx;
542     + }
543     if (vcpu->arch.cputm_enabled)
544     vcpu->arch.cputm_start += *delta;
545     if (vcpu->arch.vsie_block)
546     - vcpu->arch.vsie_block->epoch -= *delta;
547     + kvm_clock_sync_scb(vcpu->arch.vsie_block,
548     + *delta);
549     }
550     }
551     return NOTIFY_OK;
552     @@ -888,12 +914,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
553     if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
554     return -EFAULT;
555    
556     - if (test_kvm_facility(kvm, 139))
557     - kvm_s390_set_tod_clock_ext(kvm, &gtod);
558     - else if (gtod.epoch_idx == 0)
559     - kvm_s390_set_tod_clock(kvm, gtod.tod);
560     - else
561     + if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
562     return -EINVAL;
563     + kvm_s390_set_tod_clock(kvm, &gtod);
564    
565     VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
566     gtod.epoch_idx, gtod.tod);
567     @@ -918,13 +941,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
568    
569     static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
570     {
571     - u64 gtod;
572     + struct kvm_s390_vm_tod_clock gtod = { 0 };
573    
574     - if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
575     + if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
576     + sizeof(gtod.tod)))
577     return -EFAULT;
578    
579     - kvm_s390_set_tod_clock(kvm, gtod);
580     - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
581     + kvm_s390_set_tod_clock(kvm, &gtod);
582     + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
583     return 0;
584     }
585    
586     @@ -2359,6 +2383,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
587     mutex_lock(&vcpu->kvm->lock);
588     preempt_disable();
589     vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
590     + vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
591     preempt_enable();
592     mutex_unlock(&vcpu->kvm->lock);
593     if (!kvm_is_ucontrol(vcpu->kvm)) {
594     @@ -2945,8 +2970,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
595     return 0;
596     }
597    
598     -void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
599     - const struct kvm_s390_vm_tod_clock *gtod)
600     +void kvm_s390_set_tod_clock(struct kvm *kvm,
601     + const struct kvm_s390_vm_tod_clock *gtod)
602     {
603     struct kvm_vcpu *vcpu;
604     struct kvm_s390_tod_clock_ext htod;
605     @@ -2958,10 +2983,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
606     get_tod_clock_ext((char *)&htod);
607    
608     kvm->arch.epoch = gtod->tod - htod.tod;
609     - kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
610     -
611     - if (kvm->arch.epoch > gtod->tod)
612     - kvm->arch.epdx -= 1;
613     + kvm->arch.epdx = 0;
614     + if (test_kvm_facility(kvm, 139)) {
615     + kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
616     + if (kvm->arch.epoch > gtod->tod)
617     + kvm->arch.epdx -= 1;
618     + }
619    
620     kvm_s390_vcpu_block_all(kvm);
621     kvm_for_each_vcpu(i, vcpu, kvm) {
622     @@ -2974,22 +3001,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
623     mutex_unlock(&kvm->lock);
624     }
625    
626     -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
627     -{
628     - struct kvm_vcpu *vcpu;
629     - int i;
630     -
631     - mutex_lock(&kvm->lock);
632     - preempt_disable();
633     - kvm->arch.epoch = tod - get_tod_clock();
634     - kvm_s390_vcpu_block_all(kvm);
635     - kvm_for_each_vcpu(i, vcpu, kvm)
636     - vcpu->arch.sie_block->epoch = kvm->arch.epoch;
637     - kvm_s390_vcpu_unblock_all(kvm);
638     - preempt_enable();
639     - mutex_unlock(&kvm->lock);
640     -}
641     -
642     /**
643     * kvm_arch_fault_in_page - fault-in guest page if necessary
644     * @vcpu: The corresponding virtual cpu
645     diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
646     index 9f8fdd7b2311..e22d94f494a7 100644
647     --- a/arch/s390/kvm/kvm-s390.h
648     +++ b/arch/s390/kvm/kvm-s390.h
649     @@ -272,9 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
650     int handle_sthyi(struct kvm_vcpu *vcpu);
651    
652     /* implemented in kvm-s390.c */
653     -void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
654     - const struct kvm_s390_vm_tod_clock *gtod);
655     -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
656     +void kvm_s390_set_tod_clock(struct kvm *kvm,
657     + const struct kvm_s390_vm_tod_clock *gtod);
658     long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
659     int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
660     int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
661     diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
662     index 7bd3a59232f0..734283a21677 100644
663     --- a/arch/s390/kvm/priv.c
664     +++ b/arch/s390/kvm/priv.c
665     @@ -84,9 +84,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
666     /* Handle SCK (SET CLOCK) interception */
667     static int handle_set_clock(struct kvm_vcpu *vcpu)
668     {
669     + struct kvm_s390_vm_tod_clock gtod = { 0 };
670     int rc;
671     u8 ar;
672     - u64 op2, val;
673     + u64 op2;
674    
675     if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
676     return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
677     @@ -94,12 +95,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
678     op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
679     if (op2 & 7) /* Operand must be on a doubleword boundary */
680     return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
681     - rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
682     + rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
683     if (rc)
684     return kvm_s390_inject_prog_cond(vcpu, rc);
685    
686     - VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
687     - kvm_s390_set_tod_clock(vcpu->kvm, val);
688     + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
689     + kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
690    
691     kvm_s390_set_psw_cc(vcpu, 0);
692     return 0;
693     diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
694     index 8b8f1f14a0bf..5c790e93657d 100644
695     --- a/arch/x86/include/asm/pgtable.h
696     +++ b/arch/x86/include/asm/pgtable.h
697     @@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
698     {
699     pmdval_t v = native_pmd_val(pmd);
700    
701     - return __pmd(v | set);
702     + return native_make_pmd(v | set);
703     }
704    
705     static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
706     {
707     pmdval_t v = native_pmd_val(pmd);
708    
709     - return __pmd(v & ~clear);
710     + return native_make_pmd(v & ~clear);
711     }
712    
713     static inline pmd_t pmd_mkold(pmd_t pmd)
714     @@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
715     {
716     pudval_t v = native_pud_val(pud);
717    
718     - return __pud(v | set);
719     + return native_make_pud(v | set);
720     }
721    
722     static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
723     {
724     pudval_t v = native_pud_val(pud);
725    
726     - return __pud(v & ~clear);
727     + return native_make_pud(v & ~clear);
728     }
729    
730     static inline pud_t pud_mkold(pud_t pud)
731     diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
732     index e55466760ff8..b3ec519e3982 100644
733     --- a/arch/x86/include/asm/pgtable_32.h
734     +++ b/arch/x86/include/asm/pgtable_32.h
735     @@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
736     static inline void pgtable_cache_init(void) { }
737     static inline void check_pgt_cache(void) { }
738     void paging_init(void);
739     +void sync_initial_page_table(void);
740    
741     /*
742     * Define this if things work differently on an i386 and an i486:
743     diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
744     index 81462e9a34f6..1149d2112b2e 100644
745     --- a/arch/x86/include/asm/pgtable_64.h
746     +++ b/arch/x86/include/asm/pgtable_64.h
747     @@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
748     #define swapper_pg_dir init_top_pgt
749    
750     extern void paging_init(void);
751     +static inline void sync_initial_page_table(void) { }
752    
753     #define pte_ERROR(e) \
754     pr_err("%s:%d: bad pte %p(%016lx)\n", \
755     diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
756     index 3696398a9475..246f15b4e64c 100644
757     --- a/arch/x86/include/asm/pgtable_types.h
758     +++ b/arch/x86/include/asm/pgtable_types.h
759     @@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
760     #else
761     #include <asm-generic/pgtable-nopud.h>
762    
763     +static inline pud_t native_make_pud(pudval_t val)
764     +{
765     + return (pud_t) { .p4d.pgd = native_make_pgd(val) };
766     +}
767     +
768     static inline pudval_t native_pud_val(pud_t pud)
769     {
770     return native_pgd_val(pud.p4d.pgd);
771     @@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
772     #else
773     #include <asm-generic/pgtable-nopmd.h>
774    
775     +static inline pmd_t native_make_pmd(pmdval_t val)
776     +{
777     + return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
778     +}
779     +
780     static inline pmdval_t native_pmd_val(pmd_t pmd)
781     {
782     return native_pgd_val(pmd.pud.p4d.pgd);
783     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
784     index c54361a22f59..efbcf5283520 100644
785     --- a/arch/x86/kernel/setup.c
786     +++ b/arch/x86/kernel/setup.c
787     @@ -1238,20 +1238,13 @@ void __init setup_arch(char **cmdline_p)
788    
789     kasan_init();
790    
791     -#ifdef CONFIG_X86_32
792     - /* sync back kernel address range */
793     - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
794     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
795     - KERNEL_PGD_PTRS);
796     -
797     /*
798     - * sync back low identity map too. It is used for example
799     - * in the 32-bit EFI stub.
800     + * Sync back kernel address range.
801     + *
802     + * FIXME: Can the later sync in setup_cpu_entry_areas() replace
803     + * this call?
804     */
805     - clone_pgd_range(initial_page_table,
806     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
807     - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
808     -#endif
809     + sync_initial_page_table();
810    
811     tboot_probe();
812    
813     diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
814     index 497aa766fab3..ea554f812ee1 100644
815     --- a/arch/x86/kernel/setup_percpu.c
816     +++ b/arch/x86/kernel/setup_percpu.c
817     @@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
818     /* Setup cpu initialized, callin, callout masks */
819     setup_cpu_local_masks();
820    
821     -#ifdef CONFIG_X86_32
822     /*
823     * Sync back kernel address range again. We already did this in
824     * setup_arch(), but percpu data also needs to be available in
825     * the smpboot asm. We can't reliably pick up percpu mappings
826     * using vmalloc_fault(), because exception dispatch needs
827     * percpu data.
828     + *
829     + * FIXME: Can the later sync in setup_cpu_entry_areas() replace
830     + * this call?
831     */
832     - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
833     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
834     - KERNEL_PGD_PTRS);
835     -
836     - /*
837     - * sync back low identity map too. It is used for example
838     - * in the 32-bit EFI stub.
839     - */
840     - clone_pgd_range(initial_page_table,
841     - swapper_pg_dir + KERNEL_PGD_BOUNDARY,
842     - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
843     -#endif
844     + sync_initial_page_table();
845     }
846     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
847     index ef03efba1c23..8cfdb6484fd0 100644
848     --- a/arch/x86/kvm/lapic.c
849     +++ b/arch/x86/kvm/lapic.c
850     @@ -1944,14 +1944,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
851    
852     void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
853     {
854     - struct kvm_lapic *apic;
855     + struct kvm_lapic *apic = vcpu->arch.apic;
856     int i;
857    
858     - apic_debug("%s\n", __func__);
859     + if (!apic)
860     + return;
861    
862     - ASSERT(vcpu);
863     - apic = vcpu->arch.apic;
864     - ASSERT(apic != NULL);
865     + apic_debug("%s\n", __func__);
866    
867     /* Stop the timer in case it's a reset to an active apic */
868     hrtimer_cancel(&apic->lapic_timer.timer);
869     @@ -2107,7 +2106,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
870     */
871     vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
872     static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
873     - kvm_lapic_reset(vcpu, false);
874     kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
875    
876     return 0;
877     @@ -2511,7 +2509,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
878    
879     pe = xchg(&apic->pending_events, 0);
880     if (test_bit(KVM_APIC_INIT, &pe)) {
881     - kvm_lapic_reset(vcpu, true);
882     kvm_vcpu_reset(vcpu, true);
883     if (kvm_vcpu_is_bsp(apic->vcpu))
884     vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
885     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
886     index ca000fc644bc..2b6f8a4f2731 100644
887     --- a/arch/x86/kvm/mmu.c
888     +++ b/arch/x86/kvm/mmu.c
889     @@ -150,6 +150,20 @@ module_param(dbg, bool, 0644);
890     /* make pte_list_desc fit well in cache line */
891     #define PTE_LIST_EXT 3
892    
893     +/*
894     + * Return values of handle_mmio_page_fault and mmu.page_fault:
895     + * RET_PF_RETRY: let CPU fault again on the address.
896     + * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
897     + *
898     + * For handle_mmio_page_fault only:
899     + * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
900     + */
901     +enum {
902     + RET_PF_RETRY = 0,
903     + RET_PF_EMULATE = 1,
904     + RET_PF_INVALID = 2,
905     +};
906     +
907     struct pte_list_desc {
908     u64 *sptes[PTE_LIST_EXT];
909     struct pte_list_desc *more;
910     @@ -2794,13 +2808,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
911     return ret;
912     }
913    
914     -static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
915     - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
916     - bool speculative, bool host_writable)
917     +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
918     + int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
919     + bool speculative, bool host_writable)
920     {
921     int was_rmapped = 0;
922     int rmap_count;
923     - bool emulate = false;
924     + int ret = RET_PF_RETRY;
925    
926     pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
927     *sptep, write_fault, gfn);
928     @@ -2830,12 +2844,12 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
929     if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
930     true, host_writable)) {
931     if (write_fault)
932     - emulate = true;
933     + ret = RET_PF_EMULATE;
934     kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
935     }
936    
937     if (unlikely(is_mmio_spte(*sptep)))
938     - emulate = true;
939     + ret = RET_PF_EMULATE;
940    
941     pgprintk("%s: setting spte %llx\n", __func__, *sptep);
942     pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
943     @@ -2855,7 +2869,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
944    
945     kvm_release_pfn_clean(pfn);
946    
947     - return emulate;
948     + return ret;
949     }
950    
951     static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
952     @@ -2994,17 +3008,16 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
953     * Do not cache the mmio info caused by writing the readonly gfn
954     * into the spte otherwise read access on readonly gfn also can
955     * caused mmio page fault and treat it as mmio access.
956     - * Return 1 to tell kvm to emulate it.
957     */
958     if (pfn == KVM_PFN_ERR_RO_FAULT)
959     - return 1;
960     + return RET_PF_EMULATE;
961    
962     if (pfn == KVM_PFN_ERR_HWPOISON) {
963     kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
964     - return 0;
965     + return RET_PF_RETRY;
966     }
967    
968     - return -EFAULT;
969     + return RET_PF_EMULATE;
970     }
971    
972     static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
973     @@ -3286,13 +3299,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
974     }
975    
976     if (fast_page_fault(vcpu, v, level, error_code))
977     - return 0;
978     + return RET_PF_RETRY;
979    
980     mmu_seq = vcpu->kvm->mmu_notifier_seq;
981     smp_rmb();
982    
983     if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
984     - return 0;
985     + return RET_PF_RETRY;
986    
987     if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
988     return r;
989     @@ -3312,7 +3325,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
990     out_unlock:
991     spin_unlock(&vcpu->kvm->mmu_lock);
992     kvm_release_pfn_clean(pfn);
993     - return 0;
994     + return RET_PF_RETRY;
995     }
996    
997    
998     @@ -3659,54 +3672,38 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
999     return reserved;
1000     }
1001    
1002     -/*
1003     - * Return values of handle_mmio_page_fault:
1004     - * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
1005     - * directly.
1006     - * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
1007     - * fault path update the mmio spte.
1008     - * RET_MMIO_PF_RETRY: let CPU fault again on the address.
1009     - * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
1010     - */
1011     -enum {
1012     - RET_MMIO_PF_EMULATE = 1,
1013     - RET_MMIO_PF_INVALID = 2,
1014     - RET_MMIO_PF_RETRY = 0,
1015     - RET_MMIO_PF_BUG = -1
1016     -};
1017     -
1018     static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
1019     {
1020     u64 spte;
1021     bool reserved;
1022    
1023     if (mmio_info_in_cache(vcpu, addr, direct))
1024     - return RET_MMIO_PF_EMULATE;
1025     + return RET_PF_EMULATE;
1026    
1027     reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
1028     if (WARN_ON(reserved))
1029     - return RET_MMIO_PF_BUG;
1030     + return -EINVAL;
1031    
1032     if (is_mmio_spte(spte)) {
1033     gfn_t gfn = get_mmio_spte_gfn(spte);
1034     unsigned access = get_mmio_spte_access(spte);
1035    
1036     if (!check_mmio_spte(vcpu, spte))
1037     - return RET_MMIO_PF_INVALID;
1038     + return RET_PF_INVALID;
1039    
1040     if (direct)
1041     addr = 0;
1042    
1043     trace_handle_mmio_page_fault(addr, gfn, access);
1044     vcpu_cache_mmio_info(vcpu, addr, gfn, access);
1045     - return RET_MMIO_PF_EMULATE;
1046     + return RET_PF_EMULATE;
1047     }
1048    
1049     /*
1050     * If the page table is zapped by other cpus, let CPU fault again on
1051     * the address.
1052     */
1053     - return RET_MMIO_PF_RETRY;
1054     + return RET_PF_RETRY;
1055     }
1056     EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
1057    
1058     @@ -3756,7 +3753,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1059     pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1060    
1061     if (page_fault_handle_page_track(vcpu, error_code, gfn))
1062     - return 1;
1063     + return RET_PF_EMULATE;
1064    
1065     r = mmu_topup_memory_caches(vcpu);
1066     if (r)
1067     @@ -3877,7 +3874,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
1068     MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1069    
1070     if (page_fault_handle_page_track(vcpu, error_code, gfn))
1071     - return 1;
1072     + return RET_PF_EMULATE;
1073    
1074     r = mmu_topup_memory_caches(vcpu);
1075     if (r)
1076     @@ -3894,13 +3891,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
1077     }
1078    
1079     if (fast_page_fault(vcpu, gpa, level, error_code))
1080     - return 0;
1081     + return RET_PF_RETRY;
1082    
1083     mmu_seq = vcpu->kvm->mmu_notifier_seq;
1084     smp_rmb();
1085    
1086     if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
1087     - return 0;
1088     + return RET_PF_RETRY;
1089    
1090     if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
1091     return r;
1092     @@ -3920,7 +3917,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
1093     out_unlock:
1094     spin_unlock(&vcpu->kvm->mmu_lock);
1095     kvm_release_pfn_clean(pfn);
1096     - return 0;
1097     + return RET_PF_RETRY;
1098     }
1099    
1100     static void nonpaging_init_context(struct kvm_vcpu *vcpu,
1101     @@ -4919,25 +4916,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
1102     vcpu->arch.gpa_val = cr2;
1103     }
1104    
1105     + r = RET_PF_INVALID;
1106     if (unlikely(error_code & PFERR_RSVD_MASK)) {
1107     r = handle_mmio_page_fault(vcpu, cr2, direct);
1108     - if (r == RET_MMIO_PF_EMULATE) {
1109     + if (r == RET_PF_EMULATE) {
1110     emulation_type = 0;
1111     goto emulate;
1112     }
1113     - if (r == RET_MMIO_PF_RETRY)
1114     - return 1;
1115     - if (r < 0)
1116     - return r;
1117     - /* Must be RET_MMIO_PF_INVALID. */
1118     }
1119    
1120     - r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
1121     - false);
1122     + if (r == RET_PF_INVALID) {
1123     + r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
1124     + false);
1125     + WARN_ON(r == RET_PF_INVALID);
1126     + }
1127     +
1128     + if (r == RET_PF_RETRY)
1129     + return 1;
1130     if (r < 0)
1131     return r;
1132     - if (!r)
1133     - return 1;
1134    
1135     /*
1136     * Before emulating the instruction, check if the error code
1137     diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
1138     index f18d1f8d332b..5abae72266b7 100644
1139     --- a/arch/x86/kvm/paging_tmpl.h
1140     +++ b/arch/x86/kvm/paging_tmpl.h
1141     @@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
1142     struct kvm_mmu_page *sp = NULL;
1143     struct kvm_shadow_walk_iterator it;
1144     unsigned direct_access, access = gw->pt_access;
1145     - int top_level, emulate;
1146     + int top_level, ret;
1147    
1148     direct_access = gw->pte_access;
1149    
1150     @@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
1151     }
1152    
1153     clear_sp_write_flooding_count(it.sptep);
1154     - emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
1155     - it.level, gw->gfn, pfn, prefault, map_writable);
1156     + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
1157     + it.level, gw->gfn, pfn, prefault, map_writable);
1158     FNAME(pte_prefetch)(vcpu, gw, it.sptep);
1159    
1160     - return emulate;
1161     + return ret;
1162    
1163     out_gpte_changed:
1164     kvm_release_pfn_clean(pfn);
1165     - return 0;
1166     + return RET_PF_RETRY;
1167     }
1168    
1169     /*
1170     @@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
1171     if (!prefault)
1172     inject_page_fault(vcpu, &walker.fault);
1173    
1174     - return 0;
1175     + return RET_PF_RETRY;
1176     }
1177    
1178     if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
1179     shadow_page_table_clear_flood(vcpu, addr);
1180     - return 1;
1181     + return RET_PF_EMULATE;
1182     }
1183    
1184     vcpu->arch.write_fault_to_shadow_pgtable = false;
1185     @@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
1186    
1187     if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
1188     &map_writable))
1189     - return 0;
1190     + return RET_PF_RETRY;
1191    
1192     if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
1193     return r;
1194     @@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
1195     out_unlock:
1196     spin_unlock(&vcpu->kvm->mmu_lock);
1197     kvm_release_pfn_clean(pfn);
1198     - return 0;
1199     + return RET_PF_RETRY;
1200     }
1201    
1202     static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
1203     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1204     index e0bc3ad0f6cd..9fb0daf628cb 100644
1205     --- a/arch/x86/kvm/svm.c
1206     +++ b/arch/x86/kvm/svm.c
1207     @@ -45,6 +45,7 @@
1208     #include <asm/debugreg.h>
1209     #include <asm/kvm_para.h>
1210     #include <asm/irq_remapping.h>
1211     +#include <asm/microcode.h>
1212     #include <asm/nospec-branch.h>
1213    
1214     #include <asm/virtext.h>
1215     @@ -5015,7 +5016,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1216     * being speculatively taken.
1217     */
1218     if (svm->spec_ctrl)
1219     - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1220     + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1221    
1222     asm volatile (
1223     "push %%" _ASM_BP "; \n\t"
1224     @@ -5124,11 +5125,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1225     * If the L02 MSR bitmap does not intercept the MSR, then we need to
1226     * save it.
1227     */
1228     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
1229     - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
1230     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1231     + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1232    
1233     if (svm->spec_ctrl)
1234     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1235     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1236    
1237     /* Eliminate branch target predictions from guest mode */
1238     vmexit_fill_RSB();
1239     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1240     index 5ffde16253cb..315fccb2684b 100644
1241     --- a/arch/x86/kvm/vmx.c
1242     +++ b/arch/x86/kvm/vmx.c
1243     @@ -51,6 +51,7 @@
1244     #include <asm/apic.h>
1245     #include <asm/irq_remapping.h>
1246     #include <asm/mmu_context.h>
1247     +#include <asm/microcode.h>
1248     #include <asm/nospec-branch.h>
1249    
1250     #include "trace.h"
1251     @@ -9431,7 +9432,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1252     * being speculatively taken.
1253     */
1254     if (vmx->spec_ctrl)
1255     - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1256     + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1257    
1258     vmx->__launched = vmx->loaded_vmcs->launched;
1259     asm(
1260     @@ -9566,11 +9567,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1261     * If the L02 MSR bitmap does not intercept the MSR, then we need to
1262     * save it.
1263     */
1264     - if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
1265     - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
1266     + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
1267     + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
1268    
1269     if (vmx->spec_ctrl)
1270     - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1271     + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1272    
1273     /* Eliminate branch target predictions from guest mode */
1274     vmexit_fill_RSB();
1275     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1276     index 0dcd7bf45dc1..b9afb4784d12 100644
1277     --- a/arch/x86/kvm/x86.c
1278     +++ b/arch/x86/kvm/x86.c
1279     @@ -7482,13 +7482,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
1280    
1281     int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1282     {
1283     - if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
1284     + if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
1285     /*
1286     * When EFER.LME and CR0.PG are set, the processor is in
1287     * 64-bit mode (though maybe in a 32-bit code segment).
1288     * CR4.PAE and EFER.LMA must be set.
1289     */
1290     - if (!(sregs->cr4 & X86_CR4_PAE_BIT)
1291     + if (!(sregs->cr4 & X86_CR4_PAE)
1292     || !(sregs->efer & EFER_LMA))
1293     return -EINVAL;
1294     } else {
1295     @@ -7821,6 +7821,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1296    
1297     void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1298     {
1299     + kvm_lapic_reset(vcpu, init_event);
1300     +
1301     vcpu->arch.hflags = 0;
1302    
1303     vcpu->arch.smi_pending = 0;
1304     @@ -8249,10 +8251,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
1305     return r;
1306     }
1307    
1308     - if (!size) {
1309     - r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
1310     - WARN_ON(r < 0);
1311     - }
1312     + if (!size)
1313     + vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
1314    
1315     return 0;
1316     }
1317     diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
1318     index b9283cc27622..476d810639a8 100644
1319     --- a/arch/x86/mm/cpu_entry_area.c
1320     +++ b/arch/x86/mm/cpu_entry_area.c
1321     @@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
1322    
1323     for_each_possible_cpu(cpu)
1324     setup_cpu_entry_area(cpu);
1325     +
1326     + /*
1327     + * This is the last essential update to swapper_pgdir which needs
1328     + * to be synchronized to initial_page_table on 32bit.
1329     + */
1330     + sync_initial_page_table();
1331     }
1332     diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
1333     index 135c9a7898c7..3141e67ec24c 100644
1334     --- a/arch/x86/mm/init_32.c
1335     +++ b/arch/x86/mm/init_32.c
1336     @@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
1337     }
1338     #endif /* CONFIG_HIGHMEM */
1339    
1340     +void __init sync_initial_page_table(void)
1341     +{
1342     + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1343     + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1344     + KERNEL_PGD_PTRS);
1345     +
1346     + /*
1347     + * sync back low identity map too. It is used for example
1348     + * in the 32-bit EFI stub.
1349     + */
1350     + clone_pgd_range(initial_page_table,
1351     + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1352     + min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1353     +}
1354     +
1355     void __init native_pagetable_init(void)
1356     {
1357     unsigned long pfn, va;
1358     diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
1359     index 86676cec99a1..09dd7f3cf621 100644
1360     --- a/arch/x86/platform/intel-mid/intel-mid.c
1361     +++ b/arch/x86/platform/intel-mid/intel-mid.c
1362     @@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
1363    
1364     static void intel_mid_reboot(void)
1365     {
1366     - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
1367     + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
1368     }
1369    
1370     static unsigned long __init intel_mid_calibrate_tsc(void)
1371     diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1372     index 92bf5ecb6baf..3e3a58ea669e 100644
1373     --- a/arch/x86/xen/suspend.c
1374     +++ b/arch/x86/xen/suspend.c
1375     @@ -1,12 +1,15 @@
1376     // SPDX-License-Identifier: GPL-2.0
1377     #include <linux/types.h>
1378     #include <linux/tick.h>
1379     +#include <linux/percpu-defs.h>
1380    
1381     #include <xen/xen.h>
1382     #include <xen/interface/xen.h>
1383     #include <xen/grant_table.h>
1384     #include <xen/events.h>
1385    
1386     +#include <asm/cpufeatures.h>
1387     +#include <asm/msr-index.h>
1388     #include <asm/xen/hypercall.h>
1389     #include <asm/xen/page.h>
1390     #include <asm/fixmap.h>
1391     @@ -15,6 +18,8 @@
1392     #include "mmu.h"
1393     #include "pmu.h"
1394    
1395     +static DEFINE_PER_CPU(u64, spec_ctrl);
1396     +
1397     void xen_arch_pre_suspend(void)
1398     {
1399     if (xen_pv_domain())
1400     @@ -31,6 +36,9 @@ void xen_arch_post_suspend(int cancelled)
1401    
1402     static void xen_vcpu_notify_restore(void *data)
1403     {
1404     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
1405     + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
1406     +
1407     /* Boot processor notified via generic timekeeping_resume() */
1408     if (smp_processor_id() == 0)
1409     return;
1410     @@ -40,7 +48,15 @@ static void xen_vcpu_notify_restore(void *data)
1411    
1412     static void xen_vcpu_notify_suspend(void *data)
1413     {
1414     + u64 tmp;
1415     +
1416     tick_suspend_local();
1417     +
1418     + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
1419     + rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
1420     + this_cpu_write(spec_ctrl, tmp);
1421     + wrmsrl(MSR_IA32_SPEC_CTRL, 0);
1422     + }
1423     }
1424    
1425     void xen_arch_resume(void)
1426     diff --git a/block/blk-core.c b/block/blk-core.c
1427     index 95b7ea996ac2..c01f4907dbbc 100644
1428     --- a/block/blk-core.c
1429     +++ b/block/blk-core.c
1430     @@ -2277,7 +2277,7 @@ blk_qc_t submit_bio(struct bio *bio)
1431     unsigned int count;
1432    
1433     if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1434     - count = queue_logical_block_size(bio->bi_disk->queue);
1435     + count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1436     else
1437     count = bio_sectors(bio);
1438    
1439     diff --git a/block/blk-mq.c b/block/blk-mq.c
1440     index b60798a30ea2..f1fb126a3be5 100644
1441     --- a/block/blk-mq.c
1442     +++ b/block/blk-mq.c
1443     @@ -638,7 +638,6 @@ static void __blk_mq_requeue_request(struct request *rq)
1444    
1445     trace_block_rq_requeue(q, rq);
1446     wbt_requeue(q->rq_wb, &rq->issue_stat);
1447     - blk_mq_sched_requeue_request(rq);
1448    
1449     if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
1450     if (q->dma_drain_size && blk_rq_bytes(rq))
1451     @@ -650,6 +649,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1452     {
1453     __blk_mq_requeue_request(rq);
1454    
1455     + /* this request will be re-inserted to io scheduler queue */
1456     + blk_mq_sched_requeue_request(rq);
1457     +
1458     BUG_ON(blk_queued_rq(rq));
1459     blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1460     }
1461     diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
1462     index f58cab82105b..09cd5cf2e459 100644
1463     --- a/block/kyber-iosched.c
1464     +++ b/block/kyber-iosched.c
1465     @@ -814,6 +814,7 @@ static struct elevator_type kyber_sched = {
1466     .limit_depth = kyber_limit_depth,
1467     .prepare_request = kyber_prepare_request,
1468     .finish_request = kyber_finish_request,
1469     + .requeue_request = kyber_finish_request,
1470     .completed_request = kyber_completed_request,
1471     .dispatch_request = kyber_dispatch_request,
1472     .has_work = kyber_has_work,
1473     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
1474     index 4d0979e02a28..b6d58cc58f5f 100644
1475     --- a/drivers/acpi/bus.c
1476     +++ b/drivers/acpi/bus.c
1477     @@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
1478     return 0;
1479     }
1480     #endif
1481     +static int set_gbl_term_list(const struct dmi_system_id *id)
1482     +{
1483     + acpi_gbl_parse_table_as_term_list = 1;
1484     + return 0;
1485     +}
1486    
1487     -static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1488     +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
1489     + /*
1490     + * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
1491     + * mode.
1492     + * https://bugzilla.kernel.org/show_bug.cgi?id=198515
1493     + */
1494     + {
1495     + .callback = set_gbl_term_list,
1496     + .ident = "Dell Precision M5530",
1497     + .matches = {
1498     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1499     + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
1500     + },
1501     + },
1502     + {
1503     + .callback = set_gbl_term_list,
1504     + .ident = "Dell XPS 15 9570",
1505     + .matches = {
1506     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1507     + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
1508     + },
1509     + },
1510     /*
1511     * Invoke DSDT corruption work-around on all Toshiba Satellite.
1512     + * DSDT will be copied to memory.
1513     * https://bugzilla.kernel.org/show_bug.cgi?id=14679
1514     */
1515     {
1516     @@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1517     {}
1518     };
1519     #else
1520     -static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
1521     +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
1522     {}
1523     };
1524     #endif
1525     @@ -1001,11 +1028,8 @@ void __init acpi_early_init(void)
1526    
1527     acpi_permanent_mmap = true;
1528    
1529     - /*
1530     - * If the machine falls into the DMI check table,
1531     - * DSDT will be copied to memory
1532     - */
1533     - dmi_check_system(dsdt_dmi_table);
1534     + /* Check machine-specific quirks */
1535     + dmi_check_system(acpi_quirks_dmi_table);
1536    
1537     status = acpi_reallocate_root_table();
1538     if (ACPI_FAILURE(status)) {
1539     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1540     index d54c3f6f728c..673698c7b143 100644
1541     --- a/drivers/bluetooth/btusb.c
1542     +++ b/drivers/bluetooth/btusb.c
1543     @@ -21,6 +21,7 @@
1544     *
1545     */
1546    
1547     +#include <linux/dmi.h>
1548     #include <linux/module.h>
1549     #include <linux/usb.h>
1550     #include <linux/usb/quirks.h>
1551     @@ -381,6 +382,21 @@ static const struct usb_device_id blacklist_table[] = {
1552     { } /* Terminating entry */
1553     };
1554    
1555     +/* The Bluetooth USB module build into some devices needs to be reset on resume,
1556     + * this is a problem with the platform (likely shutting off all power) not with
1557     + * the module itself. So we use a DMI list to match known broken platforms.
1558     + */
1559     +static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
1560     + {
1561     + /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
1562     + .matches = {
1563     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1564     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
1565     + },
1566     + },
1567     + {}
1568     +};
1569     +
1570     #define BTUSB_MAX_ISOC_FRAMES 10
1571    
1572     #define BTUSB_INTR_RUNNING 0
1573     @@ -3013,6 +3029,9 @@ static int btusb_probe(struct usb_interface *intf,
1574     hdev->send = btusb_send_frame;
1575     hdev->notify = btusb_notify;
1576    
1577     + if (dmi_check_system(btusb_needs_reset_resume_table))
1578     + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
1579     +
1580     #ifdef CONFIG_PM
1581     err = btusb_config_oob_wake(hdev);
1582     if (err)
1583     @@ -3099,12 +3118,6 @@ static int btusb_probe(struct usb_interface *intf,
1584     if (id->driver_info & BTUSB_QCA_ROME) {
1585     data->setup_on_usb = btusb_setup_qca;
1586     hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
1587     -
1588     - /* QCA Rome devices lose their updated firmware over suspend,
1589     - * but the USB hub doesn't notice any status change.
1590     - * explicitly request a device reset on resume.
1591     - */
1592     - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
1593     }
1594    
1595     #ifdef CONFIG_BT_HCIBTUSB_RTL
1596     diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
1597     index 4d1dc8b46877..f95b9c75175b 100644
1598     --- a/drivers/char/tpm/st33zp24/st33zp24.c
1599     +++ b/drivers/char/tpm/st33zp24/st33zp24.c
1600     @@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
1601     size_t count)
1602     {
1603     int size = 0;
1604     - int expected;
1605     + u32 expected;
1606    
1607     if (!chip)
1608     return -EBUSY;
1609     @@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
1610     }
1611    
1612     expected = be32_to_cpu(*(__be32 *)(buf + 2));
1613     - if (expected > count) {
1614     + if (expected > count || expected < TPM_HEADER_SIZE) {
1615     size = -EIO;
1616     goto out;
1617     }
1618     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1619     index 1d6729be4cd6..3cec403a80b3 100644
1620     --- a/drivers/char/tpm/tpm-interface.c
1621     +++ b/drivers/char/tpm/tpm-interface.c
1622     @@ -1228,6 +1228,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
1623     break;
1624    
1625     recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
1626     + if (recd > num_bytes) {
1627     + total = -EFAULT;
1628     + break;
1629     + }
1630    
1631     rlength = be32_to_cpu(tpm_cmd.header.out.length);
1632     if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
1633     diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1634     index e1a41b788f08..44a3d16231f6 100644
1635     --- a/drivers/char/tpm/tpm2-cmd.c
1636     +++ b/drivers/char/tpm/tpm2-cmd.c
1637     @@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
1638     if (!rc) {
1639     data_len = be16_to_cpup(
1640     (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
1641     + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
1642     + rc = -EFAULT;
1643     + goto out;
1644     + }
1645    
1646     rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
1647     ->header.out.length);
1648     diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1649     index 79d6bbb58e39..d5b44cadac56 100644
1650     --- a/drivers/char/tpm/tpm_i2c_infineon.c
1651     +++ b/drivers/char/tpm/tpm_i2c_infineon.c
1652     @@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
1653     static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1654     {
1655     int size = 0;
1656     - int expected, status;
1657     + int status;
1658     + u32 expected;
1659    
1660     if (count < TPM_HEADER_SIZE) {
1661     size = -EIO;
1662     @@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1663     }
1664    
1665     expected = be32_to_cpu(*(__be32 *)(buf + 2));
1666     - if ((size_t) expected > count) {
1667     + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
1668     size = -EIO;
1669     goto out;
1670     }
1671     diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1672     index c6428771841f..caa86b19c76d 100644
1673     --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1674     +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1675     @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1676     struct device *dev = chip->dev.parent;
1677     struct i2c_client *client = to_i2c_client(dev);
1678     s32 rc;
1679     - int expected, status, burst_count, retries, size = 0;
1680     + int status;
1681     + int burst_count;
1682     + int retries;
1683     + int size = 0;
1684     + u32 expected;
1685    
1686     if (count < TPM_HEADER_SIZE) {
1687     i2c_nuvoton_ready(chip); /* return to idle */
1688     @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1689     * to machine native
1690     */
1691     expected = be32_to_cpu(*(__be32 *) (buf + 2));
1692     - if (expected > count) {
1693     + if (expected > count || expected < size) {
1694     dev_err(dev, "%s() expected > count\n", __func__);
1695     size = -EIO;
1696     continue;
1697     diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
1698     index 7e55aa9ce680..ebd0e75a3e4d 100644
1699     --- a/drivers/char/tpm/tpm_tis.c
1700     +++ b/drivers/char/tpm/tpm_tis.c
1701     @@ -223,7 +223,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1702     }
1703    
1704     static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
1705     - u8 *value)
1706     + const u8 *value)
1707     {
1708     struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
1709    
1710     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1711     index 63bc6c3b949e..083578b2517e 100644
1712     --- a/drivers/char/tpm/tpm_tis_core.c
1713     +++ b/drivers/char/tpm/tpm_tis_core.c
1714     @@ -202,7 +202,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1715     {
1716     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1717     int size = 0;
1718     - int expected, status;
1719     + int status;
1720     + u32 expected;
1721    
1722     if (count < TPM_HEADER_SIZE) {
1723     size = -EIO;
1724     @@ -217,7 +218,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1725     }
1726    
1727     expected = be32_to_cpu(*(__be32 *) (buf + 2));
1728     - if (expected > count) {
1729     + if (expected > count || expected < TPM_HEADER_SIZE) {
1730     size = -EIO;
1731     goto out;
1732     }
1733     @@ -252,7 +253,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1734     * tpm.c can skip polling for the data to be available as the interrupt is
1735     * waited for here
1736     */
1737     -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
1738     +static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
1739     {
1740     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1741     int rc, status, burstcnt;
1742     @@ -343,7 +344,7 @@ static void disable_interrupts(struct tpm_chip *chip)
1743     * tpm.c can skip polling for the data to be available as the interrupt is
1744     * waited for here
1745     */
1746     -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
1747     +static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
1748     {
1749     struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
1750     int rc;
1751     diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
1752     index e2212f021a02..6bbac319ff3b 100644
1753     --- a/drivers/char/tpm/tpm_tis_core.h
1754     +++ b/drivers/char/tpm/tpm_tis_core.h
1755     @@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
1756     int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
1757     u8 *result);
1758     int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
1759     - u8 *value);
1760     + const u8 *value);
1761     int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
1762     int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
1763     int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
1764     @@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
1765     }
1766    
1767     static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
1768     - u16 len, u8 *value)
1769     + u16 len, const u8 *value)
1770     {
1771     return data->phy_ops->write_bytes(data, addr, len, value);
1772     }
1773     diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1774     index 88fe72ae967f..8ab0bd8445f6 100644
1775     --- a/drivers/char/tpm/tpm_tis_spi.c
1776     +++ b/drivers/char/tpm/tpm_tis_spi.c
1777     @@ -46,9 +46,7 @@
1778     struct tpm_tis_spi_phy {
1779     struct tpm_tis_data priv;
1780     struct spi_device *spi_device;
1781     -
1782     - u8 tx_buf[4];
1783     - u8 rx_buf[4];
1784     + u8 *iobuf;
1785     };
1786    
1787     static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
1788     @@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
1789     }
1790    
1791     static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1792     - u8 *buffer, u8 direction)
1793     + u8 *in, const u8 *out)
1794     {
1795     struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
1796     int ret = 0;
1797     @@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1798     while (len) {
1799     transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
1800    
1801     - phy->tx_buf[0] = direction | (transfer_len - 1);
1802     - phy->tx_buf[1] = 0xd4;
1803     - phy->tx_buf[2] = addr >> 8;
1804     - phy->tx_buf[3] = addr;
1805     + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
1806     + phy->iobuf[1] = 0xd4;
1807     + phy->iobuf[2] = addr >> 8;
1808     + phy->iobuf[3] = addr;
1809    
1810     memset(&spi_xfer, 0, sizeof(spi_xfer));
1811     - spi_xfer.tx_buf = phy->tx_buf;
1812     - spi_xfer.rx_buf = phy->rx_buf;
1813     + spi_xfer.tx_buf = phy->iobuf;
1814     + spi_xfer.rx_buf = phy->iobuf;
1815     spi_xfer.len = 4;
1816     spi_xfer.cs_change = 1;
1817    
1818     @@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1819     if (ret < 0)
1820     goto exit;
1821    
1822     - if ((phy->rx_buf[3] & 0x01) == 0) {
1823     + if ((phy->iobuf[3] & 0x01) == 0) {
1824     // handle SPI wait states
1825     - phy->tx_buf[0] = 0;
1826     + phy->iobuf[0] = 0;
1827    
1828     for (i = 0; i < TPM_RETRY; i++) {
1829     spi_xfer.len = 1;
1830     @@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1831     ret = spi_sync_locked(phy->spi_device, &m);
1832     if (ret < 0)
1833     goto exit;
1834     - if (phy->rx_buf[0] & 0x01)
1835     + if (phy->iobuf[0] & 0x01)
1836     break;
1837     }
1838    
1839     @@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1840     spi_xfer.len = transfer_len;
1841     spi_xfer.delay_usecs = 5;
1842    
1843     - if (direction) {
1844     + if (in) {
1845     spi_xfer.tx_buf = NULL;
1846     - spi_xfer.rx_buf = buffer;
1847     - } else {
1848     - spi_xfer.tx_buf = buffer;
1849     + } else if (out) {
1850     spi_xfer.rx_buf = NULL;
1851     + memcpy(phy->iobuf, out, transfer_len);
1852     + out += transfer_len;
1853     }
1854    
1855     spi_message_init(&m);
1856     @@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1857     if (ret < 0)
1858     goto exit;
1859    
1860     + if (in) {
1861     + memcpy(in, phy->iobuf, transfer_len);
1862     + in += transfer_len;
1863     + }
1864     +
1865     len -= transfer_len;
1866     - buffer += transfer_len;
1867     }
1868    
1869     exit:
1870     @@ -139,13 +141,13 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1871     static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1872     u16 len, u8 *result)
1873     {
1874     - return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
1875     + return tpm_tis_spi_transfer(data, addr, len, result, NULL);
1876     }
1877    
1878     static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1879     - u16 len, u8 *value)
1880     + u16 len, const u8 *value)
1881     {
1882     - return tpm_tis_spi_transfer(data, addr, len, value, 0);
1883     + return tpm_tis_spi_transfer(data, addr, len, NULL, value);
1884     }
1885    
1886     static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1887     @@ -194,6 +196,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1888    
1889     phy->spi_device = dev;
1890    
1891     + phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
1892     + if (!phy->iobuf)
1893     + return -ENOMEM;
1894     +
1895     return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1896     NULL);
1897     }
1898     diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
1899     index 7b596fa38ad2..6bebc1f9f55a 100644
1900     --- a/drivers/cpufreq/s3c24xx-cpufreq.c
1901     +++ b/drivers/cpufreq/s3c24xx-cpufreq.c
1902     @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
1903     static int s3c_cpufreq_init(struct cpufreq_policy *policy)
1904     {
1905     policy->clk = clk_arm;
1906     - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
1907     +
1908     + policy->cpuinfo.transition_latency = cpu_cur.info->latency;
1909     +
1910     + if (ftab)
1911     + return cpufreq_table_validate_and_show(policy, ftab);
1912     +
1913     + return 0;
1914     }
1915    
1916     static int __init s3c_cpufreq_initclks(void)
1917     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1918     index cd9d6ba03579..0dc0d595c47c 100644
1919     --- a/drivers/edac/sb_edac.c
1920     +++ b/drivers/edac/sb_edac.c
1921     @@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
1922     * sbridge structs
1923     */
1924    
1925     -#define NUM_CHANNELS 4 /* Max channels per MC */
1926     +#define NUM_CHANNELS 6 /* Max channels per MC */
1927     #define MAX_DIMMS 3 /* Max DIMMS per channel */
1928     #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
1929     #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
1930     diff --git a/drivers/md/md.c b/drivers/md/md.c
1931     index 6bf093cef958..e058c209bbcf 100644
1932     --- a/drivers/md/md.c
1933     +++ b/drivers/md/md.c
1934     @@ -8522,6 +8522,10 @@ static int remove_and_add_spares(struct mddev *mddev,
1935     int removed = 0;
1936     bool remove_some = false;
1937    
1938     + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
1939     + /* Mustn't remove devices when resync thread is running */
1940     + return 0;
1941     +
1942     rdev_for_each(rdev, mddev) {
1943     if ((this == NULL || rdev == this) &&
1944     rdev->raid_disk >= 0 &&
1945     diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
1946     index 50bce68ffd66..65d157fe76d1 100644
1947     --- a/drivers/media/dvb-frontends/m88ds3103.c
1948     +++ b/drivers/media/dvb-frontends/m88ds3103.c
1949     @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
1950     * New users must use I2C client binding directly!
1951     */
1952     struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1953     - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
1954     + struct i2c_adapter *i2c,
1955     + struct i2c_adapter **tuner_i2c_adapter)
1956     {
1957     struct i2c_client *client;
1958     struct i2c_board_info board_info;
1959     - struct m88ds3103_platform_data pdata;
1960     + struct m88ds3103_platform_data pdata = {};
1961    
1962     pdata.clk = cfg->clock;
1963     pdata.i2c_wr_max = cfg->i2c_wr_max;
1964     @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
1965     case M88DS3103_CHIP_ID:
1966     break;
1967     default:
1968     + ret = -ENODEV;
1969     + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
1970     goto err_kfree;
1971     }
1972    
1973     diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
1974     index 35026795be28..fa41d9422d57 100644
1975     --- a/drivers/mmc/host/dw_mmc-exynos.c
1976     +++ b/drivers/mmc/host/dw_mmc-exynos.c
1977     @@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
1978    
1979     static const struct dw_mci_drv_data exynos_drv_data = {
1980     .caps = exynos_dwmmc_caps,
1981     + .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
1982     .init = dw_mci_exynos_priv_init,
1983     .set_ios = dw_mci_exynos_set_ios,
1984     .parse_dt = dw_mci_exynos_parse_dt,
1985     diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
1986     index 64cda84b2302..864e7fcaffaf 100644
1987     --- a/drivers/mmc/host/dw_mmc-k3.c
1988     +++ b/drivers/mmc/host/dw_mmc-k3.c
1989     @@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
1990     if (priv->ctrl_id < 0)
1991     priv->ctrl_id = 0;
1992    
1993     + if (priv->ctrl_id >= TIMING_MODE)
1994     + return -EINVAL;
1995     +
1996     host->priv = priv;
1997     return 0;
1998     }
1999     @@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
2000    
2001     static const struct dw_mci_drv_data hi6220_data = {
2002     .caps = dw_mci_hi6220_caps,
2003     + .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
2004     .switch_voltage = dw_mci_hi6220_switch_voltage,
2005     .set_ios = dw_mci_hi6220_set_ios,
2006     .parse_dt = dw_mci_hi6220_parse_dt,
2007     diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
2008     index a3f1c2b30145..339295212935 100644
2009     --- a/drivers/mmc/host/dw_mmc-rockchip.c
2010     +++ b/drivers/mmc/host/dw_mmc-rockchip.c
2011     @@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
2012    
2013     static const struct dw_mci_drv_data rk3288_drv_data = {
2014     .caps = dw_mci_rk3288_dwmmc_caps,
2015     + .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
2016     .set_ios = dw_mci_rk3288_set_ios,
2017     .execute_tuning = dw_mci_rk3288_execute_tuning,
2018     .parse_dt = dw_mci_rk3288_parse_dt,
2019     diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
2020     index d38e94ae2b85..c06b5393312f 100644
2021     --- a/drivers/mmc/host/dw_mmc-zx.c
2022     +++ b/drivers/mmc/host/dw_mmc-zx.c
2023     @@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
2024    
2025     static const struct dw_mci_drv_data zx_drv_data = {
2026     .caps = zx_dwmmc_caps,
2027     + .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
2028     .execute_tuning = dw_mci_zx_execute_tuning,
2029     .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
2030     .parse_dt = dw_mci_zx_parse_dt,
2031     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
2032     index 4f2806720c5c..60341a814055 100644
2033     --- a/drivers/mmc/host/dw_mmc.c
2034     +++ b/drivers/mmc/host/dw_mmc.c
2035     @@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
2036     {
2037     struct dw_mci *host = s->private;
2038    
2039     + pm_runtime_get_sync(host->dev);
2040     +
2041     seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
2042     seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
2043     seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
2044     @@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
2045     seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
2046     seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
2047    
2048     + pm_runtime_put_autosuspend(host->dev);
2049     +
2050     return 0;
2051     }
2052    
2053     @@ -2758,12 +2762,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2054     return IRQ_HANDLED;
2055     }
2056    
2057     +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2058     +{
2059     + struct dw_mci *host = slot->host;
2060     + const struct dw_mci_drv_data *drv_data = host->drv_data;
2061     + struct mmc_host *mmc = slot->mmc;
2062     + int ctrl_id;
2063     +
2064     + if (host->pdata->caps)
2065     + mmc->caps = host->pdata->caps;
2066     +
2067     + /*
2068     + * Support MMC_CAP_ERASE by default.
2069     + * It needs to use trim/discard/erase commands.
2070     + */
2071     + mmc->caps |= MMC_CAP_ERASE;
2072     +
2073     + if (host->pdata->pm_caps)
2074     + mmc->pm_caps = host->pdata->pm_caps;
2075     +
2076     + if (host->dev->of_node) {
2077     + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2078     + if (ctrl_id < 0)
2079     + ctrl_id = 0;
2080     + } else {
2081     + ctrl_id = to_platform_device(host->dev)->id;
2082     + }
2083     +
2084     + if (drv_data && drv_data->caps) {
2085     + if (ctrl_id >= drv_data->num_caps) {
2086     + dev_err(host->dev, "invalid controller id %d\n",
2087     + ctrl_id);
2088     + return -EINVAL;
2089     + }
2090     + mmc->caps |= drv_data->caps[ctrl_id];
2091     + }
2092     +
2093     + if (host->pdata->caps2)
2094     + mmc->caps2 = host->pdata->caps2;
2095     +
2096     + /* Process SDIO IRQs through the sdio_irq_work. */
2097     + if (mmc->caps & MMC_CAP_SDIO_IRQ)
2098     + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2099     +
2100     + return 0;
2101     +}
2102     +
2103     static int dw_mci_init_slot(struct dw_mci *host)
2104     {
2105     struct mmc_host *mmc;
2106     struct dw_mci_slot *slot;
2107     - const struct dw_mci_drv_data *drv_data = host->drv_data;
2108     - int ctrl_id, ret;
2109     + int ret;
2110     u32 freq[2];
2111    
2112     mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2113     @@ -2797,38 +2846,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
2114     if (!mmc->ocr_avail)
2115     mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2116    
2117     - if (host->pdata->caps)
2118     - mmc->caps = host->pdata->caps;
2119     -
2120     - /*
2121     - * Support MMC_CAP_ERASE by default.
2122     - * It needs to use trim/discard/erase commands.
2123     - */
2124     - mmc->caps |= MMC_CAP_ERASE;
2125     -
2126     - if (host->pdata->pm_caps)
2127     - mmc->pm_caps = host->pdata->pm_caps;
2128     -
2129     - if (host->dev->of_node) {
2130     - ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2131     - if (ctrl_id < 0)
2132     - ctrl_id = 0;
2133     - } else {
2134     - ctrl_id = to_platform_device(host->dev)->id;
2135     - }
2136     - if (drv_data && drv_data->caps)
2137     - mmc->caps |= drv_data->caps[ctrl_id];
2138     -
2139     - if (host->pdata->caps2)
2140     - mmc->caps2 = host->pdata->caps2;
2141     -
2142     ret = mmc_of_parse(mmc);
2143     if (ret)
2144     goto err_host_allocated;
2145    
2146     - /* Process SDIO IRQs through the sdio_irq_work. */
2147     - if (mmc->caps & MMC_CAP_SDIO_IRQ)
2148     - mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2149     + ret = dw_mci_init_slot_caps(slot);
2150     + if (ret)
2151     + goto err_host_allocated;
2152    
2153     /* Useful defaults if platform data is unset. */
2154     if (host->use_dma == TRANS_MODE_IDMAC) {
2155     diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
2156     index 34474ad731aa..044c87ce6725 100644
2157     --- a/drivers/mmc/host/dw_mmc.h
2158     +++ b/drivers/mmc/host/dw_mmc.h
2159     @@ -542,6 +542,7 @@ struct dw_mci_slot {
2160     /**
2161     * dw_mci driver data - dw-mshc implementation specific driver data.
2162     * @caps: mmc subsystem specified capabilities of the controller(s).
2163     + * @num_caps: number of capabilities specified by @caps.
2164     * @init: early implementation specific initialization.
2165     * @set_ios: handle bus specific extensions.
2166     * @parse_dt: parse implementation specific device tree properties.
2167     @@ -553,6 +554,7 @@ struct dw_mci_slot {
2168     */
2169     struct dw_mci_drv_data {
2170     unsigned long *caps;
2171     + u32 num_caps;
2172     int (*init)(struct dw_mci *host);
2173     void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
2174     int (*parse_dt)(struct dw_mci *host);
2175     diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
2176     index 67d787fa3306..070f5da06fd2 100644
2177     --- a/drivers/mmc/host/sdhci-pci-core.c
2178     +++ b/drivers/mmc/host/sdhci-pci-core.c
2179     @@ -594,9 +594,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
2180     slot->chip->rpm_retune = intel_host->d3_retune;
2181     }
2182    
2183     -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
2184     +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
2185     +{
2186     + int err = sdhci_execute_tuning(mmc, opcode);
2187     + struct sdhci_host *host = mmc_priv(mmc);
2188     +
2189     + if (err)
2190     + return err;
2191     +
2192     + /*
2193     + * Tuning can leave the IP in an active state (Buffer Read Enable bit
2194     + * set) which prevents the entry to low power states (i.e. S0i3). Data
2195     + * reset will clear it.
2196     + */
2197     + sdhci_reset(host, SDHCI_RESET_DATA);
2198     +
2199     + return 0;
2200     +}
2201     +
2202     +static void byt_probe_slot(struct sdhci_pci_slot *slot)
2203     {
2204     + struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
2205     +
2206     byt_read_dsm(slot);
2207     +
2208     + ops->execute_tuning = intel_execute_tuning;
2209     +}
2210     +
2211     +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
2212     +{
2213     + byt_probe_slot(slot);
2214     slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
2215     MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
2216     MMC_CAP_CMD_DURING_TFR |
2217     @@ -651,7 +678,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
2218     {
2219     int err;
2220    
2221     - byt_read_dsm(slot);
2222     + byt_probe_slot(slot);
2223    
2224     err = ni_set_max_freq(slot);
2225     if (err)
2226     @@ -664,7 +691,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
2227    
2228     static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
2229     {
2230     - byt_read_dsm(slot);
2231     + byt_probe_slot(slot);
2232     slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
2233     MMC_CAP_WAIT_WHILE_BUSY;
2234     return 0;
2235     @@ -672,7 +699,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
2236    
2237     static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
2238     {
2239     - byt_read_dsm(slot);
2240     + byt_probe_slot(slot);
2241     slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
2242     MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
2243     slot->cd_idx = 0;
2244     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2245     index 608693d11bd7..75c4455e2271 100644
2246     --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2247     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2248     @@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data)
2249    
2250     reissue_mask = 1 << 0;
2251     if (!pdata->per_channel_irq)
2252     - reissue_mask |= 0xffff < 4;
2253     + reissue_mask |= 0xffff << 4;
2254    
2255     XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
2256     }
2257     diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2258     index 3e5833cf1fab..eb23f9ba1a9a 100644
2259     --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2260     +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
2261     @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
2262     struct net_device *netdev = pdata->netdev;
2263     int ret = 0;
2264    
2265     + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
2266     +
2267     pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
2268     XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
2269    
2270     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2271     index 879a9c4cef59..29f600fd6977 100644
2272     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2273     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2274     @@ -1877,6 +1877,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
2275     ixgbe_rx_pg_size(rx_ring),
2276     DMA_FROM_DEVICE,
2277     IXGBE_RX_DMA_ATTR);
2278     + } else if (ring_uses_build_skb(rx_ring)) {
2279     + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
2280     +
2281     + dma_sync_single_range_for_cpu(rx_ring->dev,
2282     + IXGBE_CB(skb)->dma,
2283     + offset,
2284     + skb_headlen(skb),
2285     + DMA_FROM_DEVICE);
2286     } else {
2287     struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
2288    
2289     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2290     index 3cdb932cae76..a863572882b2 100644
2291     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2292     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2293     @@ -1918,13 +1918,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2294     param->wq.linear = 1;
2295     }
2296    
2297     -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
2298     +static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
2299     + struct mlx5e_rq_param *param)
2300     {
2301     void *rqc = param->rqc;
2302     void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2303    
2304     MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
2305     MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
2306     +
2307     + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2308     }
2309    
2310     static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2311     @@ -2778,6 +2781,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2312     struct mlx5e_cq *cq,
2313     struct mlx5e_cq_param *param)
2314     {
2315     + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2316     + param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2317     +
2318     return mlx5e_alloc_cq_common(mdev, param, cq);
2319     }
2320    
2321     @@ -2789,7 +2795,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2322     struct mlx5e_cq *cq = &drop_rq->cq;
2323     int err;
2324    
2325     - mlx5e_build_drop_rq_param(&rq_param);
2326     + mlx5e_build_drop_rq_param(mdev, &rq_param);
2327    
2328     err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2329     if (err)
2330     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2331     index 91b1b0938931..3476f594c195 100644
2332     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2333     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2334     @@ -36,6 +36,7 @@
2335     #include <linux/tcp.h>
2336     #include <linux/bpf_trace.h>
2337     #include <net/busy_poll.h>
2338     +#include <net/ip6_checksum.h>
2339     #include "en.h"
2340     #include "en_tc.h"
2341     #include "eswitch.h"
2342     @@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
2343     return true;
2344     }
2345    
2346     +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
2347     +{
2348     + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
2349     + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
2350     + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
2351     +
2352     + tcp->check = 0;
2353     + tcp->psh = get_cqe_lro_tcppsh(cqe);
2354     +
2355     + if (tcp_ack) {
2356     + tcp->ack = 1;
2357     + tcp->ack_seq = cqe->lro_ack_seq_num;
2358     + tcp->window = cqe->lro_tcp_win;
2359     + }
2360     +}
2361     +
2362     static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
2363     u32 cqe_bcnt)
2364     {
2365     struct ethhdr *eth = (struct ethhdr *)(skb->data);
2366     struct tcphdr *tcp;
2367     int network_depth = 0;
2368     + __wsum check;
2369     __be16 proto;
2370     u16 tot_len;
2371     void *ip_p;
2372    
2373     - u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
2374     - u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
2375     - (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
2376     -
2377     skb->mac_len = ETH_HLEN;
2378     proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
2379    
2380     @@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
2381     ipv4->check = 0;
2382     ipv4->check = ip_fast_csum((unsigned char *)ipv4,
2383     ipv4->ihl);
2384     +
2385     + mlx5e_lro_update_tcp_hdr(cqe, tcp);
2386     + check = csum_partial(tcp, tcp->doff * 4,
2387     + csum_unfold((__force __sum16)cqe->check_sum));
2388     + /* Almost done, don't forget the pseudo header */
2389     + tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
2390     + tot_len - sizeof(struct iphdr),
2391     + IPPROTO_TCP, check);
2392     } else {
2393     + u16 payload_len = tot_len - sizeof(struct ipv6hdr);
2394     struct ipv6hdr *ipv6 = ip_p;
2395    
2396     tcp = ip_p + sizeof(struct ipv6hdr);
2397     skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2398    
2399     ipv6->hop_limit = cqe->lro_min_ttl;
2400     - ipv6->payload_len = cpu_to_be16(tot_len -
2401     - sizeof(struct ipv6hdr));
2402     - }
2403     -
2404     - tcp->psh = get_cqe_lro_tcppsh(cqe);
2405     -
2406     - if (tcp_ack) {
2407     - tcp->ack = 1;
2408     - tcp->ack_seq = cqe->lro_ack_seq_num;
2409     - tcp->window = cqe->lro_tcp_win;
2410     + ipv6->payload_len = cpu_to_be16(payload_len);
2411     +
2412     + mlx5e_lro_update_tcp_hdr(cqe, tcp);
2413     + check = csum_partial(tcp, tcp->doff * 4,
2414     + csum_unfold((__force __sum16)cqe->check_sum));
2415     + /* Almost done, don't forget the pseudo header */
2416     + tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
2417     + IPPROTO_TCP, check);
2418     }
2419     }
2420    
2421     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2422     index 5a4608281f38..707976482c09 100644
2423     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2424     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
2425     @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
2426     if (iph->protocol != IPPROTO_UDP)
2427     goto out;
2428    
2429     - udph = udp_hdr(skb);
2430     + /* Don't assume skb_transport_header() was set */
2431     + udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
2432     if (udph->dest != htons(9))
2433     goto out;
2434    
2435     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2436     index 1d6925d4369a..eea7f931cad3 100644
2437     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2438     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
2439     @@ -155,7 +155,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
2440     default:
2441     hlen = mlx5e_skb_l2_header_offset(skb);
2442     }
2443     - return min_t(u16, hlen, skb->len);
2444     + return min_t(u16, hlen, skb_headlen(skb));
2445     }
2446    
2447     static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
2448     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2449     index 7bef80676464..516e63244606 100644
2450     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2451     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2452     @@ -729,26 +729,29 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
2453     static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
2454     u32 tb_id)
2455     {
2456     + struct mlxsw_sp_fib *fib4;
2457     + struct mlxsw_sp_fib *fib6;
2458     struct mlxsw_sp_vr *vr;
2459     int err;
2460    
2461     vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
2462     if (!vr)
2463     return ERR_PTR(-EBUSY);
2464     - vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
2465     - if (IS_ERR(vr->fib4))
2466     - return ERR_CAST(vr->fib4);
2467     - vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
2468     - if (IS_ERR(vr->fib6)) {
2469     - err = PTR_ERR(vr->fib6);
2470     + fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
2471     + if (IS_ERR(fib4))
2472     + return ERR_CAST(fib4);
2473     + fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
2474     + if (IS_ERR(fib6)) {
2475     + err = PTR_ERR(fib6);
2476     goto err_fib6_create;
2477     }
2478     + vr->fib4 = fib4;
2479     + vr->fib6 = fib6;
2480     vr->tb_id = tb_id;
2481     return vr;
2482    
2483     err_fib6_create:
2484     - mlxsw_sp_fib_destroy(vr->fib4);
2485     - vr->fib4 = NULL;
2486     + mlxsw_sp_fib_destroy(fib4);
2487     return ERR_PTR(err);
2488     }
2489    
2490     @@ -3029,6 +3032,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2491     struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2492     int i;
2493    
2494     + if (!list_is_singular(&nh_grp->fib_list))
2495     + return;
2496     +
2497     for (i = 0; i < nh_grp->count; i++) {
2498     struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2499    
2500     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2501     index f5863e5bec81..42a6afcaae03 100644
2502     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2503     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2504     @@ -1098,6 +1098,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2505     bool dynamic)
2506     {
2507     char *sfd_pl;
2508     + u8 num_rec;
2509     int err;
2510    
2511     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2512     @@ -1107,9 +1108,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2513     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2514     mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2515     mac, fid, action, local_port);
2516     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2517     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2518     - kfree(sfd_pl);
2519     + if (err)
2520     + goto out;
2521     +
2522     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2523     + err = -EBUSY;
2524    
2525     +out:
2526     + kfree(sfd_pl);
2527     return err;
2528     }
2529    
2530     @@ -1134,6 +1142,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2531     bool adding, bool dynamic)
2532     {
2533     char *sfd_pl;
2534     + u8 num_rec;
2535     int err;
2536    
2537     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2538     @@ -1144,9 +1153,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
2539     mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
2540     mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
2541     lag_vid, lag_id);
2542     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2543     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2544     - kfree(sfd_pl);
2545     + if (err)
2546     + goto out;
2547     +
2548     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2549     + err = -EBUSY;
2550    
2551     +out:
2552     + kfree(sfd_pl);
2553     return err;
2554     }
2555    
2556     @@ -1191,6 +1207,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
2557     u16 fid, u16 mid, bool adding)
2558     {
2559     char *sfd_pl;
2560     + u8 num_rec;
2561     int err;
2562    
2563     sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
2564     @@ -1200,7 +1217,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
2565     mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
2566     mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
2567     MLXSW_REG_SFD_REC_ACTION_NOP, mid);
2568     + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
2569     err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
2570     + if (err)
2571     + goto out;
2572     +
2573     + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
2574     + err = -EBUSY;
2575     +
2576     +out:
2577     kfree(sfd_pl);
2578     return err;
2579     }
2580     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2581     index db8a4bcfc6c7..14b646b3b084 100644
2582     --- a/drivers/net/ethernet/ti/cpsw.c
2583     +++ b/drivers/net/ethernet/ti/cpsw.c
2584     @@ -1618,6 +1618,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2585     q_idx = q_idx % cpsw->tx_ch_num;
2586    
2587     txch = cpsw->txv[q_idx].ch;
2588     + txq = netdev_get_tx_queue(ndev, q_idx);
2589     ret = cpsw_tx_packet_submit(priv, skb, txch);
2590     if (unlikely(ret != 0)) {
2591     cpsw_err(priv, tx_err, "desc submit failed\n");
2592     @@ -1628,15 +1629,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2593     * tell the kernel to stop sending us tx frames.
2594     */
2595     if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2596     - txq = netdev_get_tx_queue(ndev, q_idx);
2597     netif_tx_stop_queue(txq);
2598     +
2599     + /* Barrier, so that stop_queue visible to other cpus */
2600     + smp_mb__after_atomic();
2601     +
2602     + if (cpdma_check_free_tx_desc(txch))
2603     + netif_tx_wake_queue(txq);
2604     }
2605    
2606     return NETDEV_TX_OK;
2607     fail:
2608     ndev->stats.tx_dropped++;
2609     - txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
2610     netif_tx_stop_queue(txq);
2611     +
2612     + /* Barrier, so that stop_queue visible to other cpus */
2613     + smp_mb__after_atomic();
2614     +
2615     + if (cpdma_check_free_tx_desc(txch))
2616     + netif_tx_wake_queue(txq);
2617     +
2618     return NETDEV_TX_BUSY;
2619     }
2620    
2621     diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2622     index 2b1e67bc1e73..3d860de5e342 100644
2623     --- a/drivers/net/phy/phy.c
2624     +++ b/drivers/net/phy/phy.c
2625     @@ -842,7 +842,7 @@ void phy_start(struct phy_device *phydev)
2626     break;
2627     case PHY_HALTED:
2628     /* make sure interrupts are re-enabled for the PHY */
2629     - if (phydev->irq != PHY_POLL) {
2630     + if (phy_interrupt_is_valid(phydev)) {
2631     err = phy_enable_interrupts(phydev);
2632     if (err < 0)
2633     break;
2634     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
2635     index 8c6b8918ec31..38cd2e8fae23 100644
2636     --- a/drivers/net/ppp/ppp_generic.c
2637     +++ b/drivers/net/ppp/ppp_generic.c
2638     @@ -3158,6 +3158,15 @@ ppp_connect_channel(struct channel *pch, int unit)
2639     goto outl;
2640    
2641     ppp_lock(ppp);
2642     + spin_lock_bh(&pch->downl);
2643     + if (!pch->chan) {
2644     + /* Don't connect unregistered channels */
2645     + spin_unlock_bh(&pch->downl);
2646     + ppp_unlock(ppp);
2647     + ret = -ENOTCONN;
2648     + goto outl;
2649     + }
2650     + spin_unlock_bh(&pch->downl);
2651     if (pch->file.hdrlen > ppp->file.hdrlen)
2652     ppp->file.hdrlen = pch->file.hdrlen;
2653     hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
2654     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2655     index fa51b7b0e9ea..bc38d54e37b9 100644
2656     --- a/drivers/net/tun.c
2657     +++ b/drivers/net/tun.c
2658     @@ -1315,6 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2659     else
2660     *skb_xdp = 0;
2661    
2662     + preempt_disable();
2663     rcu_read_lock();
2664     xdp_prog = rcu_dereference(tun->xdp_prog);
2665     if (xdp_prog && !*skb_xdp) {
2666     @@ -1333,9 +1334,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2667     get_page(alloc_frag->page);
2668     alloc_frag->offset += buflen;
2669     err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
2670     + xdp_do_flush_map();
2671     if (err)
2672     goto err_redirect;
2673     rcu_read_unlock();
2674     + preempt_enable();
2675     return NULL;
2676     case XDP_TX:
2677     xdp_xmit = true;
2678     @@ -1357,6 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2679     skb = build_skb(buf, buflen);
2680     if (!skb) {
2681     rcu_read_unlock();
2682     + preempt_enable();
2683     return ERR_PTR(-ENOMEM);
2684     }
2685    
2686     @@ -1369,10 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2687     skb->dev = tun->dev;
2688     generic_xdp_tx(skb, xdp_prog);
2689     rcu_read_unlock();
2690     + preempt_enable();
2691     return NULL;
2692     }
2693    
2694     rcu_read_unlock();
2695     + preempt_enable();
2696    
2697     return skb;
2698    
2699     @@ -1380,6 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
2700     put_page(alloc_frag->page);
2701     err_xdp:
2702     rcu_read_unlock();
2703     + preempt_enable();
2704     this_cpu_inc(tun->pcpu_stats->rx_dropped);
2705     return NULL;
2706     }
2707     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2708     index 7927e28f5336..6a785595b9b8 100644
2709     --- a/drivers/net/virtio_net.c
2710     +++ b/drivers/net/virtio_net.c
2711     @@ -1995,8 +1995,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2712     }
2713    
2714     /* Make sure NAPI is not using any XDP TX queues for RX. */
2715     - for (i = 0; i < vi->max_queue_pairs; i++)
2716     - napi_disable(&vi->rq[i].napi);
2717     + if (netif_running(dev))
2718     + for (i = 0; i < vi->max_queue_pairs; i++)
2719     + napi_disable(&vi->rq[i].napi);
2720    
2721     netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2722     err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2723     @@ -2015,7 +2016,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2724     }
2725     if (old_prog)
2726     bpf_prog_put(old_prog);
2727     - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2728     + if (netif_running(dev))
2729     + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2730     }
2731    
2732     return 0;
2733     diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
2734     index 0d2e00ece804..f3c1d5245978 100644
2735     --- a/drivers/net/wan/hdlc_ppp.c
2736     +++ b/drivers/net/wan/hdlc_ppp.c
2737     @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
2738     ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
2739     0, NULL);
2740     proto->restart_counter--;
2741     - } else
2742     + } else if (netif_carrier_ok(proto->dev))
2743     + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
2744     + 0, NULL);
2745     + else
2746     ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
2747     0, NULL);
2748     break;
2749     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
2750     index 33d4431c2b4b..93a082e0bdd4 100644
2751     --- a/drivers/nvme/host/rdma.c
2752     +++ b/drivers/nvme/host/rdma.c
2753     @@ -88,7 +88,6 @@ enum nvme_rdma_queue_flags {
2754    
2755     struct nvme_rdma_queue {
2756     struct nvme_rdma_qe *rsp_ring;
2757     - atomic_t sig_count;
2758     int queue_size;
2759     size_t cmnd_capsule_len;
2760     struct nvme_rdma_ctrl *ctrl;
2761     @@ -521,7 +520,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
2762     queue->cmnd_capsule_len = sizeof(struct nvme_command);
2763    
2764     queue->queue_size = queue_size;
2765     - atomic_set(&queue->sig_count, 0);
2766    
2767     queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
2768     RDMA_PS_TCP, IB_QPT_RC);
2769     @@ -1232,21 +1230,9 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
2770     nvme_end_request(rq, req->status, req->result);
2771     }
2772    
2773     -/*
2774     - * We want to signal completion at least every queue depth/2. This returns the
2775     - * largest power of two that is not above half of (queue size + 1) to optimize
2776     - * (avoid divisions).
2777     - */
2778     -static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
2779     -{
2780     - int limit = 1 << ilog2((queue->queue_size + 1) / 2);
2781     -
2782     - return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
2783     -}
2784     -
2785     static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
2786     struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
2787     - struct ib_send_wr *first, bool flush)
2788     + struct ib_send_wr *first)
2789     {
2790     struct ib_send_wr wr, *bad_wr;
2791     int ret;
2792     @@ -1255,31 +1241,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
2793     sge->length = sizeof(struct nvme_command),
2794     sge->lkey = queue->device->pd->local_dma_lkey;
2795    
2796     - qe->cqe.done = nvme_rdma_send_done;
2797     -
2798     wr.next = NULL;
2799     wr.wr_cqe = &qe->cqe;
2800     wr.sg_list = sge;
2801     wr.num_sge = num_sge;
2802     wr.opcode = IB_WR_SEND;
2803     - wr.send_flags = 0;
2804     -
2805     - /*
2806     - * Unsignalled send completions are another giant desaster in the
2807     - * IB Verbs spec: If we don't regularly post signalled sends
2808     - * the send queue will fill up and only a QP reset will rescue us.
2809     - * Would have been way to obvious to handle this in hardware or
2810     - * at least the RDMA stack..
2811     - *
2812     - * Always signal the flushes. The magic request used for the flush
2813     - * sequencer is not allocated in our driver's tagset and it's
2814     - * triggered to be freed by blk_cleanup_queue(). So we need to
2815     - * always mark it as signaled to ensure that the "wr_cqe", which is
2816     - * embedded in request's payload, is not freed when __ib_process_cq()
2817     - * calls wr_cqe->done().
2818     - */
2819     - if (nvme_rdma_queue_sig_limit(queue) || flush)
2820     - wr.send_flags |= IB_SEND_SIGNALED;
2821     + wr.send_flags = IB_SEND_SIGNALED;
2822    
2823     if (first)
2824     first->next = &wr;
2825     @@ -1329,6 +1296,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
2826     return queue->ctrl->tag_set.tags[queue_idx - 1];
2827     }
2828    
2829     +static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
2830     +{
2831     + if (unlikely(wc->status != IB_WC_SUCCESS))
2832     + nvme_rdma_wr_error(cq, wc, "ASYNC");
2833     +}
2834     +
2835     static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2836     {
2837     struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
2838     @@ -1350,10 +1323,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2839     cmd->common.flags |= NVME_CMD_SGL_METABUF;
2840     nvme_rdma_set_sg_null(cmd);
2841    
2842     + sqe->cqe.done = nvme_rdma_async_done;
2843     +
2844     ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
2845     DMA_TO_DEVICE);
2846    
2847     - ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
2848     + ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
2849     WARN_ON_ONCE(ret);
2850     }
2851    
2852     @@ -1639,7 +1614,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
2853     struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2854     struct nvme_rdma_qe *sqe = &req->sqe;
2855     struct nvme_command *c = sqe->data;
2856     - bool flush = false;
2857     struct ib_device *dev;
2858     blk_status_t ret;
2859     int err;
2860     @@ -1668,13 +1642,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
2861     goto err;
2862     }
2863    
2864     + sqe->cqe.done = nvme_rdma_send_done;
2865     +
2866     ib_dma_sync_single_for_device(dev, sqe->dma,
2867     sizeof(struct nvme_command), DMA_TO_DEVICE);
2868    
2869     - if (req_op(rq) == REQ_OP_FLUSH)
2870     - flush = true;
2871     err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
2872     - req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
2873     + req->mr->need_inval ? &req->reg_wr.wr : NULL);
2874     if (unlikely(err)) {
2875     nvme_rdma_unmap_data(queue, rq);
2876     goto err;
2877     diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
2878     index cae54f8320be..633e55c57b13 100644
2879     --- a/drivers/pci/pcie/aspm.c
2880     +++ b/drivers/pci/pcie/aspm.c
2881     @@ -803,10 +803,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
2882    
2883     /*
2884     * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
2885     - * hierarchies.
2886     + * hierarchies. Note that some PCIe host implementations omit
2887     + * the root ports entirely, in which case a downstream port on
2888     + * a switch may become the root of the link state chain for all
2889     + * its subordinate endpoints.
2890     */
2891     if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
2892     - pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
2893     + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
2894     + !pdev->bus->parent->self) {
2895     link->root = link;
2896     } else {
2897     struct pcie_link_state *parent;
2898     diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2899     index 92dd4aef21a3..6b1e83539a9d 100644
2900     --- a/drivers/s390/net/qeth_core.h
2901     +++ b/drivers/s390/net/qeth_core.h
2902     @@ -580,6 +580,11 @@ struct qeth_cmd_buffer {
2903     void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
2904     };
2905    
2906     +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
2907     +{
2908     + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
2909     +}
2910     +
2911     /**
2912     * definition of a qeth channel, used for read and write
2913     */
2914     @@ -834,7 +839,7 @@ struct qeth_trap_id {
2915     */
2916     static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
2917     {
2918     - return PFN_UP(end - 1) - PFN_DOWN(start);
2919     + return PFN_UP(end) - PFN_DOWN(start);
2920     }
2921    
2922     static inline int qeth_get_micros(void)
2923     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2924     index 7c7a244b6684..145b57762d8f 100644
2925     --- a/drivers/s390/net/qeth_core_main.c
2926     +++ b/drivers/s390/net/qeth_core_main.c
2927     @@ -2073,7 +2073,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2928     unsigned long flags;
2929     struct qeth_reply *reply = NULL;
2930     unsigned long timeout, event_timeout;
2931     - struct qeth_ipa_cmd *cmd;
2932     + struct qeth_ipa_cmd *cmd = NULL;
2933    
2934     QETH_CARD_TEXT(card, 2, "sendctl");
2935    
2936     @@ -2087,23 +2087,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2937     }
2938     reply->callback = reply_cb;
2939     reply->param = reply_param;
2940     - if (card->state == CARD_STATE_DOWN)
2941     - reply->seqno = QETH_IDX_COMMAND_SEQNO;
2942     - else
2943     - reply->seqno = card->seqno.ipa++;
2944     +
2945     init_waitqueue_head(&reply->wait_q);
2946     - spin_lock_irqsave(&card->lock, flags);
2947     - list_add_tail(&reply->list, &card->cmd_waiter_list);
2948     - spin_unlock_irqrestore(&card->lock, flags);
2949     QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
2950    
2951     while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
2952     - qeth_prepare_control_data(card, len, iob);
2953    
2954     - if (IS_IPA(iob->data))
2955     + if (IS_IPA(iob->data)) {
2956     + cmd = __ipa_cmd(iob);
2957     + cmd->hdr.seqno = card->seqno.ipa++;
2958     + reply->seqno = cmd->hdr.seqno;
2959     event_timeout = QETH_IPA_TIMEOUT;
2960     - else
2961     + } else {
2962     + reply->seqno = QETH_IDX_COMMAND_SEQNO;
2963     event_timeout = QETH_TIMEOUT;
2964     + }
2965     + qeth_prepare_control_data(card, len, iob);
2966     +
2967     + spin_lock_irqsave(&card->lock, flags);
2968     + list_add_tail(&reply->list, &card->cmd_waiter_list);
2969     + spin_unlock_irqrestore(&card->lock, flags);
2970     +
2971     timeout = jiffies + event_timeout;
2972    
2973     QETH_CARD_TEXT(card, 6, "noirqpnd");
2974     @@ -2128,9 +2132,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2975    
2976     /* we have only one long running ipassist, since we can ensure
2977     process context of this command we can sleep */
2978     - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2979     - if ((cmd->hdr.command == IPA_CMD_SETIP) &&
2980     - (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
2981     + if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
2982     + cmd->hdr.prot_version == QETH_PROT_IPV4) {
2983     if (!wait_event_timeout(reply->wait_q,
2984     atomic_read(&reply->received), event_timeout))
2985     goto time_err;
2986     @@ -2894,7 +2897,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
2987     memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2988     cmd->hdr.command = command;
2989     cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2990     - cmd->hdr.seqno = card->seqno.ipa;
2991     + /* cmd->hdr.seqno is set by qeth_send_control_data() */
2992     cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2993     cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2994     if (card->options.layer2)
2995     @@ -3859,10 +3862,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
2996     int qeth_get_elements_no(struct qeth_card *card,
2997     struct sk_buff *skb, int extra_elems, int data_offset)
2998     {
2999     - int elements = qeth_get_elements_for_range(
3000     - (addr_t)skb->data + data_offset,
3001     - (addr_t)skb->data + skb_headlen(skb)) +
3002     - qeth_get_elements_for_frags(skb);
3003     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
3004     + int elements = qeth_get_elements_for_frags(skb);
3005     + addr_t start = (addr_t)skb->data + data_offset;
3006     +
3007     + if (start != end)
3008     + elements += qeth_get_elements_for_range(start, end);
3009    
3010     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3011     QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
3012     diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
3013     index e5833837b799..8727b9517de8 100644
3014     --- a/drivers/s390/net/qeth_l3.h
3015     +++ b/drivers/s390/net/qeth_l3.h
3016     @@ -40,8 +40,40 @@ struct qeth_ipaddr {
3017     unsigned int pfxlen;
3018     } a6;
3019     } u;
3020     -
3021     };
3022     +
3023     +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
3024     + struct qeth_ipaddr *a2)
3025     +{
3026     + if (a1->proto != a2->proto)
3027     + return false;
3028     + if (a1->proto == QETH_PROT_IPV6)
3029     + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
3030     + return a1->u.a4.addr == a2->u.a4.addr;
3031     +}
3032     +
3033     +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
3034     + struct qeth_ipaddr *a2)
3035     +{
3036     + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
3037     + * so 'proto' and 'addr' match for sure.
3038     + *
3039     + * For ucast:
3040     + * - 'mac' is always 0.
3041     + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
3042     + * values are required to avoid mixups in takeover eligibility.
3043     + *
3044     + * For mcast,
3045     + * - 'mac' is mapped from the IP, and thus always matches.
3046     + * - 'mask'/'pfxlen' is always 0.
3047     + */
3048     + if (a1->type != a2->type)
3049     + return false;
3050     + if (a1->proto == QETH_PROT_IPV6)
3051     + return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
3052     + return a1->u.a4.mask == a2->u.a4.mask;
3053     +}
3054     +
3055     static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
3056     {
3057     u64 ret = 0;
3058     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
3059     index 36dee176f8e2..96576e729222 100644
3060     --- a/drivers/s390/net/qeth_l3_main.c
3061     +++ b/drivers/s390/net/qeth_l3_main.c
3062     @@ -149,6 +149,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
3063     return -EINVAL;
3064     }
3065    
3066     +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
3067     + struct qeth_ipaddr *query)
3068     +{
3069     + u64 key = qeth_l3_ipaddr_hash(query);
3070     + struct qeth_ipaddr *addr;
3071     +
3072     + if (query->is_multicast) {
3073     + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
3074     + if (qeth_l3_addr_match_ip(addr, query))
3075     + return addr;
3076     + } else {
3077     + hash_for_each_possible(card->ip_htable, addr, hnode, key)
3078     + if (qeth_l3_addr_match_ip(addr, query))
3079     + return addr;
3080     + }
3081     + return NULL;
3082     +}
3083     +
3084     static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
3085     {
3086     int i, j;
3087     @@ -202,34 +220,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
3088     return rc;
3089     }
3090    
3091     -inline int
3092     -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
3093     -{
3094     - return addr1->proto == addr2->proto &&
3095     - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
3096     - !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
3097     -}
3098     -
3099     -static struct qeth_ipaddr *
3100     -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3101     -{
3102     - struct qeth_ipaddr *addr;
3103     -
3104     - if (tmp_addr->is_multicast) {
3105     - hash_for_each_possible(card->ip_mc_htable, addr,
3106     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
3107     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
3108     - return addr;
3109     - } else {
3110     - hash_for_each_possible(card->ip_htable, addr,
3111     - hnode, qeth_l3_ipaddr_hash(tmp_addr))
3112     - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
3113     - return addr;
3114     - }
3115     -
3116     - return NULL;
3117     -}
3118     -
3119     int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3120     {
3121     int rc = 0;
3122     @@ -244,23 +234,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3123     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
3124     }
3125    
3126     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
3127     - if (!addr)
3128     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
3129     + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
3130     return -ENOENT;
3131    
3132     addr->ref_counter--;
3133     - if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
3134     - addr->type == QETH_IP_TYPE_RXIP))
3135     + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
3136     return rc;
3137     if (addr->in_progress)
3138     return -EINPROGRESS;
3139    
3140     - if (!qeth_card_hw_is_reachable(card)) {
3141     - addr->disp_flag = QETH_DISP_ADDR_DELETE;
3142     - return 0;
3143     - }
3144     -
3145     - rc = qeth_l3_deregister_addr_entry(card, addr);
3146     + if (qeth_card_hw_is_reachable(card))
3147     + rc = qeth_l3_deregister_addr_entry(card, addr);
3148    
3149     hash_del(&addr->hnode);
3150     kfree(addr);
3151     @@ -272,6 +257,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3152     {
3153     int rc = 0;
3154     struct qeth_ipaddr *addr;
3155     + char buf[40];
3156    
3157     QETH_CARD_TEXT(card, 4, "addip");
3158    
3159     @@ -282,8 +268,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3160     QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
3161     }
3162    
3163     - addr = qeth_l3_ip_from_hash(card, tmp_addr);
3164     - if (!addr) {
3165     + addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
3166     + if (addr) {
3167     + if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
3168     + return -EADDRINUSE;
3169     + if (qeth_l3_addr_match_all(addr, tmp_addr)) {
3170     + addr->ref_counter++;
3171     + return 0;
3172     + }
3173     + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
3174     + buf);
3175     + dev_warn(&card->gdev->dev,
3176     + "Registering IP address %s failed\n", buf);
3177     + return -EADDRINUSE;
3178     + } else {
3179     addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
3180     if (!addr)
3181     return -ENOMEM;
3182     @@ -323,19 +321,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
3183     (rc == IPA_RC_LAN_OFFLINE)) {
3184     addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3185     if (addr->ref_counter < 1) {
3186     - qeth_l3_delete_ip(card, addr);
3187     + qeth_l3_deregister_addr_entry(card, addr);
3188     + hash_del(&addr->hnode);
3189     kfree(addr);
3190     }
3191     } else {
3192     hash_del(&addr->hnode);
3193     kfree(addr);
3194     }
3195     - } else {
3196     - if (addr->type == QETH_IP_TYPE_NORMAL ||
3197     - addr->type == QETH_IP_TYPE_RXIP)
3198     - addr->ref_counter++;
3199     }
3200     -
3201     return rc;
3202     }
3203    
3204     @@ -403,11 +397,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
3205     spin_lock_bh(&card->ip_lock);
3206    
3207     hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
3208     - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
3209     - qeth_l3_deregister_addr_entry(card, addr);
3210     - hash_del(&addr->hnode);
3211     - kfree(addr);
3212     - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
3213     + if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
3214     if (addr->proto == QETH_PROT_IPV4) {
3215     addr->in_progress = 1;
3216     spin_unlock_bh(&card->ip_lock);
3217     @@ -723,12 +713,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
3218     return -ENOMEM;
3219    
3220     spin_lock_bh(&card->ip_lock);
3221     -
3222     - if (qeth_l3_ip_from_hash(card, ipaddr))
3223     - rc = -EEXIST;
3224     - else
3225     - qeth_l3_add_ip(card, ipaddr);
3226     -
3227     + rc = qeth_l3_add_ip(card, ipaddr);
3228     spin_unlock_bh(&card->ip_lock);
3229    
3230     kfree(ipaddr);
3231     @@ -791,12 +776,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
3232     return -ENOMEM;
3233    
3234     spin_lock_bh(&card->ip_lock);
3235     -
3236     - if (qeth_l3_ip_from_hash(card, ipaddr))
3237     - rc = -EEXIST;
3238     - else
3239     - qeth_l3_add_ip(card, ipaddr);
3240     -
3241     + rc = qeth_l3_add_ip(card, ipaddr);
3242     spin_unlock_bh(&card->ip_lock);
3243    
3244     kfree(ipaddr);
3245     @@ -1404,8 +1384,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
3246     memcpy(tmp->mac, buf, sizeof(tmp->mac));
3247     tmp->is_multicast = 1;
3248    
3249     - ipm = qeth_l3_ip_from_hash(card, tmp);
3250     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
3251     if (ipm) {
3252     + /* for mcast, by-IP match means full match */
3253     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3254     } else {
3255     ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3256     @@ -1488,8 +1469,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
3257     sizeof(struct in6_addr));
3258     tmp->is_multicast = 1;
3259    
3260     - ipm = qeth_l3_ip_from_hash(card, tmp);
3261     + ipm = qeth_l3_find_addr_by_ip(card, tmp);
3262     if (ipm) {
3263     + /* for mcast, by-IP match means full match */
3264     ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
3265     continue;
3266     }
3267     @@ -2633,11 +2615,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
3268     static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
3269     struct sk_buff *skb, int extra_elems)
3270     {
3271     - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
3272     - int elements = qeth_get_elements_for_range(
3273     - tcpdptr,
3274     - (addr_t)skb->data + skb_headlen(skb)) +
3275     - qeth_get_elements_for_frags(skb);
3276     + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
3277     + addr_t end = (addr_t)skb->data + skb_headlen(skb);
3278     + int elements = qeth_get_elements_for_frags(skb);
3279     +
3280     + if (start != end)
3281     + elements += qeth_get_elements_for_range(start, end);
3282    
3283     if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3284     QETH_DBF_MESSAGE(2,
3285     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
3286     index 92155cce926d..fb4e6a7ee521 100644
3287     --- a/drivers/vfio/vfio_iommu_type1.c
3288     +++ b/drivers/vfio/vfio_iommu_type1.c
3289     @@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3290     {
3291     struct page *page[1];
3292     struct vm_area_struct *vma;
3293     + struct vm_area_struct *vmas[1];
3294     int ret;
3295    
3296     if (mm == current->mm) {
3297     - ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
3298     - page);
3299     + ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
3300     + page, vmas);
3301     } else {
3302     unsigned int flags = 0;
3303    
3304     @@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
3305    
3306     down_read(&mm->mmap_sem);
3307     ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
3308     - NULL, NULL);
3309     + vmas, NULL);
3310     + /*
3311     + * The lifetime of a vaddr_get_pfn() page pin is
3312     + * userspace-controlled. In the fs-dax case this could
3313     + * lead to indefinite stalls in filesystem operations.
3314     + * Disallow attempts to pin fs-dax pages via this
3315     + * interface.
3316     + */
3317     + if (ret > 0 && vma_is_fsdax(vmas[0])) {
3318     + ret = -EOPNOTSUPP;
3319     + put_page(page[0]);
3320     + }
3321     up_read(&mm->mmap_sem);
3322     }
3323    
3324     diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
3325     index 883881b16c86..4447e0fe9b55 100644
3326     --- a/fs/btrfs/sysfs.c
3327     +++ b/fs/btrfs/sysfs.c
3328     @@ -422,7 +422,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
3329     {
3330     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3331    
3332     - return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
3333     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
3334     }
3335    
3336     BTRFS_ATTR(nodesize, btrfs_nodesize_show);
3337     @@ -432,8 +432,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
3338     {
3339     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3340    
3341     - return snprintf(buf, PAGE_SIZE, "%u\n",
3342     - fs_info->super_copy->sectorsize);
3343     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
3344     }
3345    
3346     BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
3347     @@ -443,8 +442,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
3348     {
3349     struct btrfs_fs_info *fs_info = to_fs_info(kobj);
3350    
3351     - return snprintf(buf, PAGE_SIZE, "%u\n",
3352     - fs_info->super_copy->sectorsize);
3353     + return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
3354     }
3355    
3356     BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
3357     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
3358     index f615d59b0489..46bda13e5727 100644
3359     --- a/fs/btrfs/transaction.c
3360     +++ b/fs/btrfs/transaction.c
3361     @@ -1722,19 +1722,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
3362    
3363     super = fs_info->super_copy;
3364    
3365     + /* update latest btrfs_super_block::chunk_root refs */
3366     root_item = &fs_info->chunk_root->root_item;
3367     - super->chunk_root = root_item->bytenr;
3368     - super->chunk_root_generation = root_item->generation;
3369     - super->chunk_root_level = root_item->level;
3370     + btrfs_set_super_chunk_root(super, root_item->bytenr);
3371     + btrfs_set_super_chunk_root_generation(super, root_item->generation);
3372     + btrfs_set_super_chunk_root_level(super, root_item->level);
3373    
3374     + /* update latest btrfs_super_block::root refs */
3375     root_item = &fs_info->tree_root->root_item;
3376     - super->root = root_item->bytenr;
3377     - super->generation = root_item->generation;
3378     - super->root_level = root_item->level;
3379     + btrfs_set_super_root(super, root_item->bytenr);
3380     + btrfs_set_super_generation(super, root_item->generation);
3381     + btrfs_set_super_root_level(super, root_item->level);
3382     +
3383     if (btrfs_test_opt(fs_info, SPACE_CACHE))
3384     - super->cache_generation = root_item->generation;
3385     + btrfs_set_super_cache_generation(super, root_item->generation);
3386     if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
3387     - super->uuid_tree_generation = root_item->generation;
3388     + btrfs_set_super_uuid_tree_generation(super,
3389     + root_item->generation);
3390     }
3391    
3392     int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
3393     diff --git a/fs/direct-io.c b/fs/direct-io.c
3394     index b53e66d9abd7..625a84aa6484 100644
3395     --- a/fs/direct-io.c
3396     +++ b/fs/direct-io.c
3397     @@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
3398     */
3399     if (dio->is_async && iov_iter_rw(iter) == WRITE) {
3400     retval = 0;
3401     - if ((iocb->ki_filp->f_flags & O_DSYNC) ||
3402     - IS_SYNC(iocb->ki_filp->f_mapping->host))
3403     + if (iocb->ki_flags & IOCB_DSYNC)
3404     retval = dio_set_defer_completion(dio);
3405     else if (!dio->inode->i_sb->s_dio_done_wq) {
3406     /*
3407     diff --git a/include/linux/fs.h b/include/linux/fs.h
3408     index 440281f8564d..d54f41a63dbf 100644
3409     --- a/include/linux/fs.h
3410     +++ b/include/linux/fs.h
3411     @@ -3185,7 +3185,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
3412     if (!vma_is_dax(vma))
3413     return false;
3414     inode = file_inode(vma->vm_file);
3415     - if (inode->i_mode == S_IFCHR)
3416     + if (S_ISCHR(inode->i_mode))
3417     return false; /* device-dax */
3418     return true;
3419     }
3420     diff --git a/include/linux/nospec.h b/include/linux/nospec.h
3421     index fbc98e2c8228..132e3f5a2e0d 100644
3422     --- a/include/linux/nospec.h
3423     +++ b/include/linux/nospec.h
3424     @@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
3425     BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
3426     BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
3427     \
3428     - _i &= _mask; \
3429     - _i; \
3430     + (typeof(_i)) (_i & _mask); \
3431     })
3432     #endif /* _LINUX_NOSPEC_H */
3433     diff --git a/include/net/udplite.h b/include/net/udplite.h
3434     index 81bdbf97319b..9185e45b997f 100644
3435     --- a/include/net/udplite.h
3436     +++ b/include/net/udplite.h
3437     @@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
3438     UDP_SKB_CB(skb)->cscov = cscov;
3439     if (skb->ip_summed == CHECKSUM_COMPLETE)
3440     skb->ip_summed = CHECKSUM_NONE;
3441     + skb->csum_valid = 0;
3442     }
3443    
3444     return 0;
3445     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3446     index db5e6daadd94..9fe525f410bf 100644
3447     --- a/kernel/time/timer.c
3448     +++ b/kernel/time/timer.c
3449     @@ -1834,6 +1834,12 @@ int timers_dead_cpu(unsigned int cpu)
3450     raw_spin_lock_irq(&new_base->lock);
3451     raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3452    
3453     + /*
3454     + * The current CPUs base clock might be stale. Update it
3455     + * before moving the timers over.
3456     + */
3457     + forward_timer_base(new_base);
3458     +
3459     BUG_ON(old_base->running_timer);
3460    
3461     for (i = 0; i < WHEEL_SIZE; i++)
3462     diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
3463     index 5d5d413a6cf8..a097a8613a02 100644
3464     --- a/net/bridge/br_sysfs_if.c
3465     +++ b/net/bridge/br_sysfs_if.c
3466     @@ -235,6 +235,9 @@ static ssize_t brport_show(struct kobject *kobj,
3467     struct brport_attribute *brport_attr = to_brport_attr(attr);
3468     struct net_bridge_port *p = to_brport(kobj);
3469    
3470     + if (!brport_attr->show)
3471     + return -EINVAL;
3472     +
3473     return brport_attr->show(p, buf);
3474     }
3475    
3476     diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
3477     index 233a30040c91..9b8a53568b0f 100644
3478     --- a/net/bridge/br_vlan.c
3479     +++ b/net/bridge/br_vlan.c
3480     @@ -157,6 +157,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
3481     masterv = br_vlan_find(vg, vid);
3482     if (WARN_ON(!masterv))
3483     return NULL;
3484     + refcount_set(&masterv->refcnt, 1);
3485     + return masterv;
3486     }
3487     refcount_inc(&masterv->refcnt);
3488    
3489     diff --git a/net/core/dev.c b/net/core/dev.c
3490     index d33bbed640b1..c75ef9d8105a 100644
3491     --- a/net/core/dev.c
3492     +++ b/net/core/dev.c
3493     @@ -2343,8 +2343,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
3494     */
3495     int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3496     {
3497     + bool disabling;
3498     int rc;
3499    
3500     + disabling = txq < dev->real_num_tx_queues;
3501     +
3502     if (txq < 1 || txq > dev->num_tx_queues)
3503     return -EINVAL;
3504    
3505     @@ -2360,15 +2363,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3506     if (dev->num_tc)
3507     netif_setup_tc(dev, txq);
3508    
3509     - if (txq < dev->real_num_tx_queues) {
3510     + dev->real_num_tx_queues = txq;
3511     +
3512     + if (disabling) {
3513     + synchronize_net();
3514     qdisc_reset_all_tx_gt(dev, txq);
3515     #ifdef CONFIG_XPS
3516     netif_reset_xps_queues_gt(dev, txq);
3517     #endif
3518     }
3519     + } else {
3520     + dev->real_num_tx_queues = txq;
3521     }
3522    
3523     - dev->real_num_tx_queues = txq;
3524     return 0;
3525     }
3526     EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3527     diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
3528     index 00ecec4891f3..7f980bd7426e 100644
3529     --- a/net/core/gen_estimator.c
3530     +++ b/net/core/gen_estimator.c
3531     @@ -66,6 +66,7 @@ struct net_rate_estimator {
3532     static void est_fetch_counters(struct net_rate_estimator *e,
3533     struct gnet_stats_basic_packed *b)
3534     {
3535     + memset(b, 0, sizeof(*b));
3536     if (e->stats_lock)
3537     spin_lock(e->stats_lock);
3538    
3539     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
3540     index aff3751df950..1ee6c0d8dde4 100644
3541     --- a/net/ipv4/fib_semantics.c
3542     +++ b/net/ipv4/fib_semantics.c
3543     @@ -654,6 +654,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
3544     fi->fib_nh, cfg, extack))
3545     return 1;
3546     }
3547     +#ifdef CONFIG_IP_ROUTE_CLASSID
3548     + if (cfg->fc_flow &&
3549     + cfg->fc_flow != fi->fib_nh->nh_tclassid)
3550     + return 1;
3551     +#endif
3552     if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
3553     (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
3554     return 0;
3555     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3556     index 0ba88efca7ad..9ff06c5051ae 100644
3557     --- a/net/ipv4/route.c
3558     +++ b/net/ipv4/route.c
3559     @@ -128,10 +128,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
3560     static int ip_rt_error_cost __read_mostly = HZ;
3561     static int ip_rt_error_burst __read_mostly = 5 * HZ;
3562     static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
3563     -static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3564     +static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3565     static int ip_rt_min_advmss __read_mostly = 256;
3566    
3567     static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
3568     +
3569     +static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3570     +
3571     /*
3572     * Interface to generic destination cache.
3573     */
3574     @@ -1829,6 +1832,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
3575     return skb_get_hash_raw(skb) >> 1;
3576     memset(&hash_keys, 0, sizeof(hash_keys));
3577     skb_flow_dissect_flow_keys(skb, &keys, flag);
3578     +
3579     + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
3580     hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
3581     hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
3582     hash_keys.ports.src = keys.ports.src;
3583     @@ -2934,7 +2939,8 @@ static struct ctl_table ipv4_route_table[] = {
3584     .data = &ip_rt_min_pmtu,
3585     .maxlen = sizeof(int),
3586     .mode = 0644,
3587     - .proc_handler = proc_dointvec,
3588     + .proc_handler = proc_dointvec_minmax,
3589     + .extra1 = &ip_min_valid_pmtu,
3590     },
3591     {
3592     .procname = "min_adv_mss",
3593     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3594     index d9d215e27b8a..14474acea0bb 100644
3595     --- a/net/ipv4/tcp_input.c
3596     +++ b/net/ipv4/tcp_input.c
3597     @@ -2013,11 +2013,6 @@ void tcp_enter_loss(struct sock *sk)
3598     /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
3599     * loss recovery is underway except recurring timeout(s) on
3600     * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
3601     - *
3602     - * In theory F-RTO can be used repeatedly during loss recovery.
3603     - * In practice this interacts badly with broken middle-boxes that
3604     - * falsely raise the receive window, which results in repeated
3605     - * timeouts and stop-and-go behavior.
3606     */
3607     tp->frto = sysctl_tcp_frto &&
3608     (new_recovery || icsk->icsk_retransmits) &&
3609     @@ -2699,18 +2694,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
3610     tcp_try_undo_loss(sk, false))
3611     return;
3612    
3613     - /* The ACK (s)acks some never-retransmitted data meaning not all
3614     - * the data packets before the timeout were lost. Therefore we
3615     - * undo the congestion window and state. This is essentially
3616     - * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
3617     - * a retransmitted skb is permantly marked, we can apply such an
3618     - * operation even if F-RTO was not used.
3619     - */
3620     - if ((flag & FLAG_ORIG_SACK_ACKED) &&
3621     - tcp_try_undo_loss(sk, tp->undo_marker))
3622     - return;
3623     -
3624     if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
3625     + /* Step 3.b. A timeout is spurious if not all data are
3626     + * lost, i.e., never-retransmitted data are (s)acked.
3627     + */
3628     + if ((flag & FLAG_ORIG_SACK_ACKED) &&
3629     + tcp_try_undo_loss(sk, true))
3630     + return;
3631     +
3632     if (after(tp->snd_nxt, tp->high_seq)) {
3633     if (flag & FLAG_DATA_SACKED || is_dupack)
3634     tp->frto = 0; /* Step 3.a. loss was real */
3635     @@ -4020,6 +4011,7 @@ void tcp_reset(struct sock *sk)
3636     /* This barrier is coupled with smp_rmb() in tcp_poll() */
3637     smp_wmb();
3638    
3639     + tcp_write_queue_purge(sk);
3640     tcp_done(sk);
3641    
3642     if (!sock_flag(sk, SOCK_DEAD))
3643     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3644     index cd3d60bb7cc8..83d11cd2eb65 100644
3645     --- a/net/ipv4/tcp_output.c
3646     +++ b/net/ipv4/tcp_output.c
3647     @@ -1681,7 +1681,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
3648     */
3649     segs = max_t(u32, bytes / mss_now, min_tso_segs);
3650    
3651     - return min_t(u32, segs, sk->sk_gso_max_segs);
3652     + return segs;
3653     }
3654     EXPORT_SYMBOL(tcp_tso_autosize);
3655    
3656     @@ -1693,8 +1693,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
3657     const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
3658     u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
3659    
3660     - return tso_segs ? :
3661     - tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
3662     + if (!tso_segs)
3663     + tso_segs = tcp_tso_autosize(sk, mss_now,
3664     + sysctl_tcp_min_tso_segs);
3665     + return min_t(u32, tso_segs, sk->sk_gso_max_segs);
3666     }
3667    
3668     /* Returns the portion of skb which can be sent right away */
3669     @@ -1973,6 +1975,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
3670     }
3671     }
3672    
3673     +static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
3674     +{
3675     + struct sk_buff *skb, *next;
3676     +
3677     + skb = tcp_send_head(sk);
3678     + tcp_for_write_queue_from_safe(skb, next, sk) {
3679     + if (len <= skb->len)
3680     + break;
3681     +
3682     + if (unlikely(TCP_SKB_CB(skb)->eor))
3683     + return false;
3684     +
3685     + len -= skb->len;
3686     + }
3687     +
3688     + return true;
3689     +}
3690     +
3691     /* Create a new MTU probe if we are ready.
3692     * MTU probe is regularly attempting to increase the path MTU by
3693     * deliberately sending larger packets. This discovers routing
3694     @@ -2045,6 +2065,9 @@ static int tcp_mtu_probe(struct sock *sk)
3695     return 0;
3696     }
3697    
3698     + if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
3699     + return -1;
3700     +
3701     /* We're allowed to probe. Build it now. */
3702     nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
3703     if (!nskb)
3704     @@ -2080,6 +2103,10 @@ static int tcp_mtu_probe(struct sock *sk)
3705     /* We've eaten all the data from this skb.
3706     * Throw it away. */
3707     TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
3708     + /* If this is the last SKB we copy and eor is set
3709     + * we need to propagate it to the new skb.
3710     + */
3711     + TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
3712     tcp_unlink_write_queue(skb, sk);
3713     sk_wmem_free_skb(sk, skb);
3714     } else {
3715     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3716     index ebfbccae62fd..c79fa6f6b758 100644
3717     --- a/net/ipv4/udp.c
3718     +++ b/net/ipv4/udp.c
3719     @@ -2032,6 +2032,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
3720     err = udplite_checksum_init(skb, uh);
3721     if (err)
3722     return err;
3723     +
3724     + if (UDP_SKB_CB(skb)->partial_cov) {
3725     + skb->csum = inet_compute_pseudo(skb, proto);
3726     + return 0;
3727     + }
3728     }
3729    
3730     /* Note, we are only interested in != 0 or == 0, thus the
3731     diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
3732     index ec43d18b5ff9..547515e8450a 100644
3733     --- a/net/ipv6/ip6_checksum.c
3734     +++ b/net/ipv6/ip6_checksum.c
3735     @@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
3736     err = udplite_checksum_init(skb, uh);
3737     if (err)
3738     return err;
3739     +
3740     + if (UDP_SKB_CB(skb)->partial_cov) {
3741     + skb->csum = ip6_compute_pseudo(skb, proto);
3742     + return 0;
3743     + }
3744     }
3745    
3746     /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
3747     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3748     index e79854cc5790..cac815cc8600 100644
3749     --- a/net/ipv6/sit.c
3750     +++ b/net/ipv6/sit.c
3751     @@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
3752     #ifdef CONFIG_IPV6_SIT_6RD
3753     struct ip_tunnel *t = netdev_priv(dev);
3754    
3755     - if (t->dev == sitn->fb_tunnel_dev) {
3756     + if (dev == sitn->fb_tunnel_dev) {
3757     ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
3758     t->ip6rd.relay_prefix = 0;
3759     t->ip6rd.prefixlen = 16;
3760     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3761     index 533fd0503ba0..9219bc134109 100644
3762     --- a/net/netlink/af_netlink.c
3763     +++ b/net/netlink/af_netlink.c
3764     @@ -2276,7 +2276,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
3765     if (cb->start) {
3766     ret = cb->start(cb);
3767     if (ret)
3768     - goto error_unlock;
3769     + goto error_put;
3770     }
3771    
3772     nlk->cb_running = true;
3773     @@ -2296,6 +2296,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
3774     */
3775     return -EINTR;
3776    
3777     +error_put:
3778     + module_put(control->module);
3779     error_unlock:
3780     sock_put(sk);
3781     mutex_unlock(nlk->cb_mutex);
3782     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
3783     index d444daf1ac04..6f02499ef007 100644
3784     --- a/net/netlink/genetlink.c
3785     +++ b/net/netlink/genetlink.c
3786     @@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
3787     {
3788     struct sk_buff *tmp;
3789     struct net *net, *prev = NULL;
3790     + bool delivered = false;
3791     int err;
3792    
3793     for_each_net_rcu(net) {
3794     @@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
3795     }
3796     err = nlmsg_multicast(prev->genl_sock, tmp,
3797     portid, group, flags);
3798     - if (err)
3799     + if (!err)
3800     + delivered = true;
3801     + else if (err != -ESRCH)
3802     goto error;
3803     }
3804    
3805     prev = net;
3806     }
3807    
3808     - return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
3809     + err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
3810     + if (!err)
3811     + delivered = true;
3812     + else if (err != -ESRCH)
3813     + goto error;
3814     + return delivered ? 0 : -ESRCH;
3815     error:
3816     kfree_skb(skb);
3817     return err;
3818     diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
3819     index 71e6f713fbe7..5b67cb5d47f0 100644
3820     --- a/net/rxrpc/output.c
3821     +++ b/net/rxrpc/output.c
3822     @@ -395,7 +395,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
3823     (char *)&opt, sizeof(opt));
3824     if (ret == 0) {
3825     ret = kernel_sendmsg(conn->params.local->socket, &msg,
3826     - iov, 1, iov[0].iov_len);
3827     + iov, 2, len);
3828    
3829     opt = IPV6_PMTUDISC_DO;
3830     kernel_setsockopt(conn->params.local->socket,
3831     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
3832     index 934c239cf98d..c2fab4bcb8be 100644
3833     --- a/net/sched/cls_api.c
3834     +++ b/net/sched/cls_api.c
3835     @@ -871,13 +871,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
3836     if (tca[TCA_CHAIN] &&
3837     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
3838     continue;
3839     - if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
3840     + if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) {
3841     + err = -EMSGSIZE;
3842     break;
3843     + }
3844     }
3845    
3846     cb->args[0] = index;
3847    
3848     out:
3849     + /* If we did no progress, the error (EMSGSIZE) is real */
3850     + if (skb->len == 0 && err)
3851     + return err;
3852     return skb->len;
3853     }
3854    
3855     diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
3856     index b58eccb21f03..ba37d8f57e68 100644
3857     --- a/net/sched/cls_u32.c
3858     +++ b/net/sched/cls_u32.c
3859     @@ -398,10 +398,12 @@ static int u32_init(struct tcf_proto *tp)
3860     static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
3861     bool free_pf)
3862     {
3863     + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
3864     +
3865     tcf_exts_destroy(&n->exts);
3866     tcf_exts_put_net(&n->exts);
3867     - if (n->ht_down)
3868     - n->ht_down->refcnt--;
3869     + if (ht && --ht->refcnt == 0)
3870     + kfree(ht);
3871     #ifdef CONFIG_CLS_U32_PERF
3872     if (free_pf)
3873     free_percpu(n->pf);
3874     @@ -649,16 +651,15 @@ static void u32_destroy(struct tcf_proto *tp)
3875    
3876     hlist_del(&tp_c->hnode);
3877    
3878     - for (ht = rtnl_dereference(tp_c->hlist);
3879     - ht;
3880     - ht = rtnl_dereference(ht->next)) {
3881     - ht->refcnt--;
3882     - u32_clear_hnode(tp, ht);
3883     - }
3884     -
3885     while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
3886     + u32_clear_hnode(tp, ht);
3887     RCU_INIT_POINTER(tp_c->hlist, ht->next);
3888     - kfree_rcu(ht, rcu);
3889     +
3890     + /* u32_destroy_key() will later free ht for us, if it's
3891     + * still referenced by some knode
3892     + */
3893     + if (--ht->refcnt == 0)
3894     + kfree_rcu(ht, rcu);
3895     }
3896    
3897     kfree(tp_c);
3898     @@ -927,7 +928,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
3899     if (TC_U32_KEY(n->handle) == 0)
3900     return -EINVAL;
3901    
3902     - if (n->flags != flags)
3903     + if ((n->flags ^ flags) &
3904     + ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW))
3905     return -EINVAL;
3906    
3907     new = u32_init_knode(tp, n);
3908     diff --git a/net/sctp/input.c b/net/sctp/input.c
3909     index 141c9c466ec1..0247cc432e02 100644
3910     --- a/net/sctp/input.c
3911     +++ b/net/sctp/input.c
3912     @@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
3913     rhl_for_each_entry_rcu(transport, tmp, list, node)
3914     if (transport->asoc->ep == t->asoc->ep) {
3915     rcu_read_unlock();
3916     - err = -EEXIST;
3917     - goto out;
3918     + return -EEXIST;
3919     }
3920     rcu_read_unlock();
3921    
3922     err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
3923     &t->node, sctp_hash_params);
3924     -
3925     -out:
3926     if (err)
3927     pr_err_once("insert transport fail, errno %d\n", err);
3928    
3929     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3930     index 3b18085e3b10..f27a9718554c 100644
3931     --- a/net/sctp/ipv6.c
3932     +++ b/net/sctp/ipv6.c
3933     @@ -326,8 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
3934     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
3935     bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
3936    
3937     - if (!IS_ERR(bdst) &&
3938     - ipv6_chk_addr(dev_net(bdst->dev),
3939     + if (IS_ERR(bdst))
3940     + continue;
3941     +
3942     + if (ipv6_chk_addr(dev_net(bdst->dev),
3943     &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
3944     if (!IS_ERR_OR_NULL(dst))
3945     dst_release(dst);
3946     @@ -336,8 +338,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
3947     }
3948    
3949     bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
3950     - if (matchlen > bmatchlen)
3951     + if (matchlen > bmatchlen) {
3952     + dst_release(bdst);
3953     continue;
3954     + }
3955    
3956     if (!IS_ERR_OR_NULL(dst))
3957     dst_release(dst);
3958     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3959     index fcd80feb293f..df22a9c352ad 100644
3960     --- a/net/sctp/protocol.c
3961     +++ b/net/sctp/protocol.c
3962     @@ -514,22 +514,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
3963     if (IS_ERR(rt))
3964     continue;
3965    
3966     - if (!dst)
3967     - dst = &rt->dst;
3968     -
3969     /* Ensure the src address belongs to the output
3970     * interface.
3971     */
3972     odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
3973     false);
3974     if (!odev || odev->ifindex != fl4->flowi4_oif) {
3975     - if (&rt->dst != dst)
3976     + if (!dst)
3977     + dst = &rt->dst;
3978     + else
3979     dst_release(&rt->dst);
3980     continue;
3981     }
3982    
3983     - if (dst != &rt->dst)
3984     - dst_release(dst);
3985     + dst_release(dst);
3986     dst = &rt->dst;
3987     break;
3988     }
3989     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3990     index 514465b03829..e4a400f88168 100644
3991     --- a/net/sctp/sm_make_chunk.c
3992     +++ b/net/sctp/sm_make_chunk.c
3993     @@ -1378,9 +1378,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
3994     struct sctp_chunk *retval;
3995     struct sk_buff *skb;
3996     struct sock *sk;
3997     + int chunklen;
3998     +
3999     + chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
4000     + if (chunklen > SCTP_MAX_CHUNK_LEN)
4001     + goto nodata;
4002    
4003     /* No need to allocate LL here, as this is only a chunk. */
4004     - skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
4005     + skb = alloc_skb(chunklen, gfp);
4006     if (!skb)
4007     goto nodata;
4008    
4009     diff --git a/sound/core/control.c b/sound/core/control.c
4010     index 56b3e2d49c82..af7e6165e21e 100644
4011     --- a/sound/core/control.c
4012     +++ b/sound/core/control.c
4013     @@ -888,7 +888,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
4014    
4015     index_offset = snd_ctl_get_ioff(kctl, &control->id);
4016     vd = &kctl->vd[index_offset];
4017     - if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
4018     + if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
4019     return -EPERM;
4020    
4021     snd_ctl_build_ioff(&control->id, kctl, index_offset);
4022     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4023     index c71dcacea807..96143df19b21 100644
4024     --- a/sound/pci/hda/hda_intel.c
4025     +++ b/sound/pci/hda/hda_intel.c
4026     @@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
4027     };
4028     #define param_check_xint param_check_int
4029    
4030     -static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
4031     +static int power_save = -1;
4032     module_param(power_save, xint, 0644);
4033     MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
4034     "(in second, 0 = disable).");
4035     @@ -2186,6 +2186,24 @@ static int azx_probe(struct pci_dev *pci,
4036     return err;
4037     }
4038    
4039     +#ifdef CONFIG_PM
4040     +/* On some boards setting power_save to a non 0 value leads to clicking /
4041     + * popping sounds when ever we enter/leave powersaving mode. Ideally we would
4042     + * figure out how to avoid these sounds, but that is not always feasible.
4043     + * So we keep a list of devices where we disable powersaving as its known
4044     + * to causes problems on these devices.
4045     + */
4046     +static struct snd_pci_quirk power_save_blacklist[] = {
4047     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
4048     + SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
4049     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
4050     + SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
4051     + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
4052     + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
4053     + {}
4054     +};
4055     +#endif /* CONFIG_PM */
4056     +
4057     /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
4058     static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
4059     [AZX_DRIVER_NVIDIA] = 8,
4060     @@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
4061     struct hdac_bus *bus = azx_bus(chip);
4062     struct pci_dev *pci = chip->pci;
4063     int dev = chip->dev_index;
4064     + int val;
4065     int err;
4066    
4067     hda->probe_continued = 1;
4068     @@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
4069    
4070     chip->running = 1;
4071     azx_add_card_list(chip);
4072     - snd_hda_set_power_save(&chip->bus, power_save * 1000);
4073     +
4074     + val = power_save;
4075     +#ifdef CONFIG_PM
4076     + if (val == -1) {
4077     + const struct snd_pci_quirk *q;
4078     +
4079     + val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
4080     + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
4081     + if (q && val) {
4082     + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
4083     + q->subvendor, q->subdevice);
4084     + val = 0;
4085     + }
4086     + }
4087     +#endif /* CONFIG_PM */
4088     + snd_hda_set_power_save(&chip->bus, val * 1000);
4089     if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
4090     pm_runtime_put_autosuspend(&pci->dev);
4091    
4092     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4093     index b7acffdf16a4..454476b47b79 100644
4094     --- a/sound/pci/hda/patch_realtek.c
4095     +++ b/sound/pci/hda/patch_realtek.c
4096     @@ -4852,13 +4852,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
4097    
4098     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4099     spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4100     + snd_hda_apply_pincfgs(codec, pincfgs);
4101     + } else if (action == HDA_FIXUP_ACT_INIT) {
4102     /* Enable DOCK device */
4103     snd_hda_codec_write(codec, 0x17, 0,
4104     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
4105     /* Enable DOCK device */
4106     snd_hda_codec_write(codec, 0x19, 0,
4107     AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
4108     - snd_hda_apply_pincfgs(codec, pincfgs);
4109     }
4110     }
4111    
4112     diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
4113     index 8a59d4782a0f..69bf5cf1e91e 100644
4114     --- a/sound/usb/quirks-table.h
4115     +++ b/sound/usb/quirks-table.h
4116     @@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
4117     }
4118     },
4119    
4120     +{
4121     + /*
4122     + * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
4123     + * even though it advertises more. The capture interface doesn't work
4124     + * even on windows.
4125     + */
4126     + USB_DEVICE(0x19b5, 0x0021),
4127     + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
4128     + .ifnum = QUIRK_ANY_INTERFACE,
4129     + .type = QUIRK_COMPOSITE,
4130     + .data = (const struct snd_usb_audio_quirk[]) {
4131     + {
4132     + .ifnum = 0,
4133     + .type = QUIRK_AUDIO_STANDARD_MIXER,
4134     + },
4135     + /* Capture */
4136     + {
4137     + .ifnum = 1,
4138     + .type = QUIRK_IGNORE_INTERFACE,
4139     + },
4140     + /* Playback */
4141     + {
4142     + .ifnum = 2,
4143     + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
4144     + .data = &(const struct audioformat) {
4145     + .formats = SNDRV_PCM_FMTBIT_S16_LE,
4146     + .channels = 2,
4147     + .iface = 2,
4148     + .altsetting = 1,
4149     + .altset_idx = 1,
4150     + .attributes = UAC_EP_CS_ATTR_FILL_MAX |
4151     + UAC_EP_CS_ATTR_SAMPLE_RATE,
4152     + .endpoint = 0x03,
4153     + .ep_attr = USB_ENDPOINT_XFER_ISOC,
4154     + .rates = SNDRV_PCM_RATE_48000,
4155     + .rate_min = 48000,
4156     + .rate_max = 48000,
4157     + .nr_rates = 1,
4158     + .rate_table = (unsigned int[]) {
4159     + 48000
4160     + }
4161     + }
4162     + },
4163     + }
4164     + }
4165     +},
4166     +
4167     #undef USB_DEVICE_VENDOR_SPEC
4168     diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
4169     index a0951505c7f5..697872d8308e 100644
4170     --- a/sound/x86/intel_hdmi_audio.c
4171     +++ b/sound/x86/intel_hdmi_audio.c
4172     @@ -1827,6 +1827,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
4173     ctx->port = port;
4174     ctx->pipe = -1;
4175    
4176     + spin_lock_init(&ctx->had_spinlock);
4177     + mutex_init(&ctx->mutex);
4178     INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
4179    
4180     ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
4181     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4182     index 8401774f5aeb..d81af263f50b 100644
4183     --- a/virt/kvm/kvm_main.c
4184     +++ b/virt/kvm/kvm_main.c
4185     @@ -975,8 +975,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
4186     /* Check for overlaps */
4187     r = -EEXIST;
4188     kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
4189     - if ((slot->id >= KVM_USER_MEM_SLOTS) ||
4190     - (slot->id == id))
4191     + if (slot->id == id)
4192     continue;
4193     if (!((base_gfn + npages <= slot->base_gfn) ||
4194     (base_gfn >= slot->base_gfn + slot->npages)))