Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0141-4.9.42-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 110270 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 82eb3d1ee801..34d4d9f8a4b2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 41
9     +SUBLEVEL = 42
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
14     index 7037201c5e3a..f3baa896ce84 100644
15     --- a/arch/arm/boot/dts/Makefile
16     +++ b/arch/arm/boot/dts/Makefile
17     @@ -820,6 +820,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
18     sun8i-a83t-allwinner-h8homlet-v2.dtb \
19     sun8i-a83t-cubietruck-plus.dtb \
20     sun8i-h3-bananapi-m2-plus.dtb \
21     + sun8i-h3-nanopi-m1.dtb \
22     sun8i-h3-nanopi-neo.dtb \
23     sun8i-h3-orangepi-2.dtb \
24     sun8i-h3-orangepi-lite.dtb \
25     diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
26     index 895fa6cfa15a..563901e0ec07 100644
27     --- a/arch/arm/boot/dts/armada-388-gp.dts
28     +++ b/arch/arm/boot/dts/armada-388-gp.dts
29     @@ -75,7 +75,7 @@
30     pinctrl-names = "default";
31     pinctrl-0 = <&pca0_pins>;
32     interrupt-parent = <&gpio0>;
33     - interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
34     + interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
35     gpio-controller;
36     #gpio-cells = <2>;
37     interrupt-controller;
38     @@ -87,7 +87,7 @@
39     compatible = "nxp,pca9555";
40     pinctrl-names = "default";
41     interrupt-parent = <&gpio0>;
42     - interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
43     + interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
44     gpio-controller;
45     #gpio-cells = <2>;
46     interrupt-controller;
47     diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
48     index 5ea4915f6d75..10d307408f23 100644
49     --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
50     +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
51     @@ -56,7 +56,7 @@
52     };
53    
54     &pio {
55     - mmc2_pins_nrst: mmc2@0 {
56     + mmc2_pins_nrst: mmc2-rst-pin {
57     allwinner,pins = "PC16";
58     allwinner,function = "gpio_out";
59     allwinner,drive = <SUN4I_PINCTRL_10_MA>;
60     diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
61     index 4cab64cb581e..e3a51e3538b7 100644
62     --- a/arch/arm/boot/dts/tango4-vantage-1172.dts
63     +++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
64     @@ -21,7 +21,7 @@
65     };
66    
67     &eth0 {
68     - phy-connection-type = "rgmii";
69     + phy-connection-type = "rgmii-id";
70     phy-handle = <&eth0_phy>;
71     #address-cells = <1>;
72     #size-cells = <0>;
73     diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
74     index bfe2a2f5a644..22b73112b75f 100644
75     --- a/arch/arm/include/asm/ftrace.h
76     +++ b/arch/arm/include/asm/ftrace.h
77     @@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
78    
79     #define ftrace_return_address(n) return_address(n)
80    
81     +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
82     +
83     +static inline bool arch_syscall_match_sym_name(const char *sym,
84     + const char *name)
85     +{
86     + if (!strcmp(sym, "sys_mmap2"))
87     + sym = "sys_mmap_pgoff";
88     + else if (!strcmp(sym, "sys_statfs64_wrapper"))
89     + sym = "sys_statfs64";
90     + else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
91     + sym = "sys_fstatfs64";
92     + else if (!strcmp(sym, "sys_arm_fadvise64_64"))
93     + sym = "sys_fadvise64_64";
94     +
95     + /* Ignore case since sym may start with "SyS" instead of "sys" */
96     + return !strcasecmp(sym, name);
97     +}
98     +
99     #endif /* ifndef __ASSEMBLY__ */
100    
101     #endif /* _ASM_ARM_FTRACE */
102     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
103     index c721ea2fdbd8..df757c9675e6 100644
104     --- a/arch/parisc/kernel/cache.c
105     +++ b/arch/parisc/kernel/cache.c
106     @@ -604,13 +604,12 @@ void flush_cache_range(struct vm_area_struct *vma,
107     if (parisc_requires_coherency())
108     flush_tlb_range(vma, start, end);
109    
110     - if ((end - start) >= parisc_cache_flush_threshold) {
111     + if ((end - start) >= parisc_cache_flush_threshold
112     + || vma->vm_mm->context != mfsp(3)) {
113     flush_cache_all();
114     return;
115     }
116    
117     - BUG_ON(vma->vm_mm->context != mfsp(3));
118     -
119     flush_user_dcache_range_asm(start, end);
120     if (vma->vm_flags & VM_EXEC)
121     flush_user_icache_range_asm(start, end);
122     diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
123     index 3c05c311e35e..028a22bfa90c 100644
124     --- a/arch/powerpc/kernel/irq.c
125     +++ b/arch/powerpc/kernel/irq.c
126     @@ -146,6 +146,19 @@ notrace unsigned int __check_irq_replay(void)
127    
128     /* Clear bit 0 which we wouldn't clear otherwise */
129     local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
130     + if (happened & PACA_IRQ_HARD_DIS) {
131     + /*
132     + * We may have missed a decrementer interrupt if hard disabled.
133     + * Check the decrementer register in case we had a rollover
134     + * while hard disabled.
135     + */
136     + if (!(happened & PACA_IRQ_DEC)) {
137     + if (decrementer_check_overflow()) {
138     + local_paca->irq_happened |= PACA_IRQ_DEC;
139     + happened |= PACA_IRQ_DEC;
140     + }
141     + }
142     + }
143    
144     /*
145     * Force the delivery of pending soft-disabled interrupts on PS3.
146     @@ -171,7 +184,7 @@ notrace unsigned int __check_irq_replay(void)
147     * in case we also had a rollover while hard disabled
148     */
149     local_paca->irq_happened &= ~PACA_IRQ_DEC;
150     - if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
151     + if (happened & PACA_IRQ_DEC)
152     return 0x900;
153    
154     /* Finally check if an external interrupt happened */
155     diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
156     index 5c8f12fe9721..dcbb9144c16d 100644
157     --- a/arch/powerpc/kernel/ptrace.c
158     +++ b/arch/powerpc/kernel/ptrace.c
159     @@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
160     * If task is not current, it will have been flushed already to
161     * it's thread_struct during __switch_to().
162     *
163     - * A reclaim flushes ALL the state.
164     + * A reclaim flushes ALL the state or if not in TM save TM SPRs
165     + * in the appropriate thread structures from live.
166     */
167    
168     - if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
169     - tm_reclaim_current(TM_CAUSE_SIGNAL);
170     + if (tsk != current)
171     + return;
172    
173     + if (MSR_TM_SUSPENDED(mfmsr())) {
174     + tm_reclaim_current(TM_CAUSE_SIGNAL);
175     + } else {
176     + tm_enable();
177     + tm_save_sprs(&(tsk->thread));
178     + }
179     }
180     #else
181     static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
182     diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
183     index ec9c04de3664..ff05992dae7a 100644
184     --- a/arch/sparc/include/asm/trap_block.h
185     +++ b/arch/sparc/include/asm/trap_block.h
186     @@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
187     void init_cur_cpu_trap(struct thread_info *);
188     void setup_tba(void);
189     extern int ncpus_probed;
190     +extern u64 cpu_mondo_counter[NR_CPUS];
191    
192     unsigned long real_hard_smp_processor_id(void);
193    
194     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
195     index d5807d24b98f..2deb89ef1d5f 100644
196     --- a/arch/sparc/kernel/smp_64.c
197     +++ b/arch/sparc/kernel/smp_64.c
198     @@ -621,22 +621,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
199     }
200     }
201    
202     -/* Multi-cpu list version. */
203     +#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
204     +#define MONDO_USEC_WAIT_MIN 2
205     +#define MONDO_USEC_WAIT_MAX 100
206     +#define MONDO_RETRY_LIMIT 500000
207     +
208     +/* Multi-cpu list version.
209     + *
210     + * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
211     + * Sometimes not all cpus receive the mondo, requiring us to re-send
212     + * the mondo until all cpus have received, or cpus are truly stuck
213     + * unable to receive mondo, and we timeout.
214     + * Occasionally a target cpu strand is borrowed briefly by hypervisor to
215     + * perform guest service, such as PCIe error handling. Consider the
216     + * service time, 1 second overall wait is reasonable for 1 cpu.
217     + * Here two in-between mondo check wait time are defined: 2 usec for
218     + * single cpu quick turn around and up to 100usec for large cpu count.
219     + * Deliver mondo to large number of cpus could take longer, we adjusts
220     + * the retry count as long as target cpus are making forward progress.
221     + */
222     static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
223     {
224     - int retries, this_cpu, prev_sent, i, saw_cpu_error;
225     + int this_cpu, tot_cpus, prev_sent, i, rem;
226     + int usec_wait, retries, tot_retries;
227     + u16 first_cpu = 0xffff;
228     + unsigned long xc_rcvd = 0;
229     unsigned long status;
230     + int ecpuerror_id = 0;
231     + int enocpu_id = 0;
232     u16 *cpu_list;
233     + u16 cpu;
234    
235     this_cpu = smp_processor_id();
236     -
237     cpu_list = __va(tb->cpu_list_pa);
238     -
239     - saw_cpu_error = 0;
240     - retries = 0;
241     + usec_wait = cnt * MONDO_USEC_WAIT_MIN;
242     + if (usec_wait > MONDO_USEC_WAIT_MAX)
243     + usec_wait = MONDO_USEC_WAIT_MAX;
244     + retries = tot_retries = 0;
245     + tot_cpus = cnt;
246     prev_sent = 0;
247     +
248     do {
249     - int forward_progress, n_sent;
250     + int n_sent, mondo_delivered, target_cpu_busy;
251    
252     status = sun4v_cpu_mondo_send(cnt,
253     tb->cpu_list_pa,
254     @@ -644,94 +670,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
255    
256     /* HV_EOK means all cpus received the xcall, we're done. */
257     if (likely(status == HV_EOK))
258     - break;
259     + goto xcall_done;
260     +
261     + /* If not these non-fatal errors, panic */
262     + if (unlikely((status != HV_EWOULDBLOCK) &&
263     + (status != HV_ECPUERROR) &&
264     + (status != HV_ENOCPU)))
265     + goto fatal_errors;
266    
267     /* First, see if we made any forward progress.
268     + *
269     + * Go through the cpu_list, count the target cpus that have
270     + * received our mondo (n_sent), and those that did not (rem).
271     + * Re-pack cpu_list with the cpus remain to be retried in the
272     + * front - this simplifies tracking the truly stalled cpus.
273     *
274     * The hypervisor indicates successful sends by setting
275     * cpu list entries to the value 0xffff.
276     + *
277     + * EWOULDBLOCK means some target cpus did not receive the
278     + * mondo and retry usually helps.
279     + *
280     + * ECPUERROR means at least one target cpu is in error state,
281     + * it's usually safe to skip the faulty cpu and retry.
282     + *
283     + * ENOCPU means one of the target cpu doesn't belong to the
284     + * domain, perhaps offlined which is unexpected, but not
285     + * fatal and it's okay to skip the offlined cpu.
286     */
287     + rem = 0;
288     n_sent = 0;
289     for (i = 0; i < cnt; i++) {
290     - if (likely(cpu_list[i] == 0xffff))
291     + cpu = cpu_list[i];
292     + if (likely(cpu == 0xffff)) {
293     n_sent++;
294     + } else if ((status == HV_ECPUERROR) &&
295     + (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
296     + ecpuerror_id = cpu + 1;
297     + } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
298     + enocpu_id = cpu + 1;
299     + } else {
300     + cpu_list[rem++] = cpu;
301     + }
302     }
303    
304     - forward_progress = 0;
305     - if (n_sent > prev_sent)
306     - forward_progress = 1;
307     + /* No cpu remained, we're done. */
308     + if (rem == 0)
309     + break;
310    
311     - prev_sent = n_sent;
312     + /* Otherwise, update the cpu count for retry. */
313     + cnt = rem;
314    
315     - /* If we get a HV_ECPUERROR, then one or more of the cpus
316     - * in the list are in error state. Use the cpu_state()
317     - * hypervisor call to find out which cpus are in error state.
318     + /* Record the overall number of mondos received by the
319     + * first of the remaining cpus.
320     */
321     - if (unlikely(status == HV_ECPUERROR)) {
322     - for (i = 0; i < cnt; i++) {
323     - long err;
324     - u16 cpu;
325     + if (first_cpu != cpu_list[0]) {
326     + first_cpu = cpu_list[0];
327     + xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
328     + }
329    
330     - cpu = cpu_list[i];
331     - if (cpu == 0xffff)
332     - continue;
333     + /* Was any mondo delivered successfully? */
334     + mondo_delivered = (n_sent > prev_sent);
335     + prev_sent = n_sent;
336    
337     - err = sun4v_cpu_state(cpu);
338     - if (err == HV_CPU_STATE_ERROR) {
339     - saw_cpu_error = (cpu + 1);
340     - cpu_list[i] = 0xffff;
341     - }
342     - }
343     - } else if (unlikely(status != HV_EWOULDBLOCK))
344     - goto fatal_mondo_error;
345     + /* or, was any target cpu busy processing other mondos? */
346     + target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
347     + xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
348    
349     - /* Don't bother rewriting the CPU list, just leave the
350     - * 0xffff and non-0xffff entries in there and the
351     - * hypervisor will do the right thing.
352     - *
353     - * Only advance timeout state if we didn't make any
354     - * forward progress.
355     + /* Retry count is for no progress. If we're making progress,
356     + * reset the retry count.
357     */
358     - if (unlikely(!forward_progress)) {
359     - if (unlikely(++retries > 10000))
360     - goto fatal_mondo_timeout;
361     -
362     - /* Delay a little bit to let other cpus catch up
363     - * on their cpu mondo queue work.
364     - */
365     - udelay(2 * cnt);
366     + if (likely(mondo_delivered || target_cpu_busy)) {
367     + tot_retries += retries;
368     + retries = 0;
369     + } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
370     + goto fatal_mondo_timeout;
371     }
372     - } while (1);
373    
374     - if (unlikely(saw_cpu_error))
375     - goto fatal_mondo_cpu_error;
376     + /* Delay a little bit to let other cpus catch up on
377     + * their cpu mondo queue work.
378     + */
379     + if (!mondo_delivered)
380     + udelay(usec_wait);
381    
382     - return;
383     + retries++;
384     + } while (1);
385    
386     -fatal_mondo_cpu_error:
387     - printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
388     - "(including %d) were in error state\n",
389     - this_cpu, saw_cpu_error - 1);
390     +xcall_done:
391     + if (unlikely(ecpuerror_id > 0)) {
392     + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
393     + this_cpu, ecpuerror_id - 1);
394     + } else if (unlikely(enocpu_id > 0)) {
395     + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
396     + this_cpu, enocpu_id - 1);
397     + }
398     return;
399    
400     +fatal_errors:
401     + /* fatal errors include bad alignment, etc */
402     + pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
403     + this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
404     + panic("Unexpected SUN4V mondo error %lu\n", status);
405     +
406     fatal_mondo_timeout:
407     - printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
408     - " progress after %d retries.\n",
409     - this_cpu, retries);
410     - goto dump_cpu_list_and_out;
411     -
412     -fatal_mondo_error:
413     - printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
414     - this_cpu, status);
415     - printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
416     - "mondo_block_pa(%lx)\n",
417     - this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
418     -
419     -dump_cpu_list_and_out:
420     - printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
421     - for (i = 0; i < cnt; i++)
422     - printk("%u ", cpu_list[i]);
423     - printk("]\n");
424     + /* some cpus being non-responsive to the cpu mondo */
425     + pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
426     + this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
427     + panic("SUN4V mondo timeout panic\n");
428     }
429    
430     static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
431     diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
432     index 559bc5e9c199..34631995859a 100644
433     --- a/arch/sparc/kernel/sun4v_ivec.S
434     +++ b/arch/sparc/kernel/sun4v_ivec.S
435     @@ -26,6 +26,21 @@ sun4v_cpu_mondo:
436     ldxa [%g0] ASI_SCRATCHPAD, %g4
437     sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
438    
439     + /* Get smp_processor_id() into %g3 */
440     + sethi %hi(trap_block), %g5
441     + or %g5, %lo(trap_block), %g5
442     + sub %g4, %g5, %g3
443     + srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
444     +
445     + /* Increment cpu_mondo_counter[smp_processor_id()] */
446     + sethi %hi(cpu_mondo_counter), %g5
447     + or %g5, %lo(cpu_mondo_counter), %g5
448     + sllx %g3, 3, %g3
449     + add %g5, %g3, %g5
450     + ldx [%g5], %g3
451     + add %g3, 1, %g3
452     + stx %g3, [%g5]
453     +
454     /* Get CPU mondo queue base phys address into %g7. */
455     ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
456    
457     diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
458     index d44fb806bbd7..32dafb920908 100644
459     --- a/arch/sparc/kernel/traps_64.c
460     +++ b/arch/sparc/kernel/traps_64.c
461     @@ -2732,6 +2732,7 @@ void do_getpsr(struct pt_regs *regs)
462     }
463     }
464    
465     +u64 cpu_mondo_counter[NR_CPUS] = {0};
466     struct trap_per_cpu trap_block[NR_CPUS];
467     EXPORT_SYMBOL(trap_block);
468    
469     diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
470     index 54f98706b03b..5a8cb37f0a3b 100644
471     --- a/arch/sparc/lib/U3memcpy.S
472     +++ b/arch/sparc/lib/U3memcpy.S
473     @@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
474     ENTRY(U3_retl_o2_and_7_plus_GS)
475     and %o2, 7, %o2
476     retl
477     - add %o2, GLOBAL_SPARE, %o2
478     + add %o2, GLOBAL_SPARE, %o0
479     ENDPROC(U3_retl_o2_and_7_plus_GS)
480     ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
481     add GLOBAL_SPARE, 8, GLOBAL_SPARE
482     and %o2, 7, %o2
483     retl
484     - add %o2, GLOBAL_SPARE, %o2
485     + add %o2, GLOBAL_SPARE, %o0
486     ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
487     #endif
488    
489     diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
490     index cc3bd583dce1..9e240fcba784 100644
491     --- a/arch/x86/boot/string.c
492     +++ b/arch/x86/boot/string.c
493     @@ -14,6 +14,7 @@
494    
495     #include <linux/types.h>
496     #include "ctype.h"
497     +#include "string.h"
498    
499     int memcmp(const void *s1, const void *s2, size_t len)
500     {
501     diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
502     index 725e820602b1..113588ddb43f 100644
503     --- a/arch/x86/boot/string.h
504     +++ b/arch/x86/boot/string.h
505     @@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
506     #define memset(d,c,l) __builtin_memset(d,c,l)
507     #define memcmp __builtin_memcmp
508    
509     +extern int strcmp(const char *str1, const char *str2);
510     +extern int strncmp(const char *cs, const char *ct, size_t count);
511     +extern size_t strlen(const char *s);
512     +extern char *strstr(const char *s1, const char *s2);
513     +extern size_t strnlen(const char *s, size_t maxlen);
514     +extern unsigned int atou(const char *s);
515     +extern unsigned long long simple_strtoull(const char *cp, char **endp,
516     + unsigned int base);
517     +
518     #endif /* BOOT_STRING_H */
519     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
520     index 9cf697ceedbf..55ffd9dc2258 100644
521     --- a/arch/x86/kernel/kvm.c
522     +++ b/arch/x86/kernel/kvm.c
523     @@ -152,6 +152,8 @@ void kvm_async_pf_task_wait(u32 token)
524     if (hlist_unhashed(&n.link))
525     break;
526    
527     + rcu_irq_exit();
528     +
529     if (!n.halted) {
530     local_irq_enable();
531     schedule();
532     @@ -160,11 +162,11 @@ void kvm_async_pf_task_wait(u32 token)
533     /*
534     * We cannot reschedule. So halt.
535     */
536     - rcu_irq_exit();
537     native_safe_halt();
538     local_irq_disable();
539     - rcu_irq_enter();
540     }
541     +
542     + rcu_irq_enter();
543     }
544     if (!n.halted)
545     finish_swait(&n.wq, &wait);
546     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
547     index 8e575fbdf31d..e3e10e8f6f6a 100644
548     --- a/drivers/ata/libata-scsi.c
549     +++ b/drivers/ata/libata-scsi.c
550     @@ -2971,10 +2971,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
551     static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
552     {
553     if (!sata_pmp_attached(ap)) {
554     - if (likely(devno < ata_link_max_devices(&ap->link)))
555     + if (likely(devno >= 0 &&
556     + devno < ata_link_max_devices(&ap->link)))
557     return &ap->link.device[devno];
558     } else {
559     - if (likely(devno < ap->nr_pmp_links))
560     + if (likely(devno >= 0 &&
561     + devno < ap->nr_pmp_links))
562     return &ap->pmp_link[devno].device[0];
563     }
564    
565     diff --git a/drivers/base/property.c b/drivers/base/property.c
566     index 43a36d68c3fd..06f66687fe0b 100644
567     --- a/drivers/base/property.c
568     +++ b/drivers/base/property.c
569     @@ -182,11 +182,12 @@ static int pset_prop_read_string(struct property_set *pset,
570     return 0;
571     }
572    
573     -static inline struct fwnode_handle *dev_fwnode(struct device *dev)
574     +struct fwnode_handle *dev_fwnode(struct device *dev)
575     {
576     return IS_ENABLED(CONFIG_OF) && dev->of_node ?
577     &dev->of_node->fwnode : dev->fwnode;
578     }
579     +EXPORT_SYMBOL_GPL(dev_fwnode);
580    
581     /**
582     * device_property_present - check if a property of a device is present
583     diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
584     index c9441f9d4585..98b767d3171e 100644
585     --- a/drivers/block/nbd.c
586     +++ b/drivers/block/nbd.c
587     @@ -929,6 +929,7 @@ static int __init nbd_init(void)
588     return -ENOMEM;
589    
590     for (i = 0; i < nbds_max; i++) {
591     + struct request_queue *q;
592     struct gendisk *disk = alloc_disk(1 << part_shift);
593     if (!disk)
594     goto out;
595     @@ -954,12 +955,13 @@ static int __init nbd_init(void)
596     * every gendisk to have its very own request_queue struct.
597     * These structs are big so we dynamically allocate them.
598     */
599     - disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
600     - if (!disk->queue) {
601     + q = blk_mq_init_queue(&nbd_dev[i].tag_set);
602     + if (IS_ERR(q)) {
603     blk_mq_free_tag_set(&nbd_dev[i].tag_set);
604     put_disk(disk);
605     goto out;
606     }
607     + disk->queue = q;
608    
609     /*
610     * Tell the block layer that we are not a rotational device
611     diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
612     index 3c3b8f601469..10332c24f961 100644
613     --- a/drivers/block/virtio_blk.c
614     +++ b/drivers/block/virtio_blk.c
615     @@ -630,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
616     if (err)
617     goto out_put_disk;
618    
619     - q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
620     + q = blk_mq_init_queue(&vblk->tag_set);
621     if (IS_ERR(q)) {
622     err = -ENOMEM;
623     goto out_free_tags;
624     }
625     + vblk->disk->queue = q;
626    
627     q->queuedata = vblk;
628    
629     diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
630     index 8c8b495cbf0d..cdc092a1d9ef 100644
631     --- a/drivers/clk/samsung/clk-exynos5420.c
632     +++ b/drivers/clk/samsung/clk-exynos5420.c
633     @@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
634     GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
635     GATE_BUS_TOP, 24, 0, 0),
636     GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
637     - GATE_BUS_TOP, 27, 0, 0),
638     + GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
639     };
640    
641     static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
642     @@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
643     GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
644    
645     GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
646     - GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
647     + GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
648     GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
649     GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
650    
651     GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
652     GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
653     GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
654     - GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
655     + GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
656     GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
657     GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
658     GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
659     GATE_BUS_TOP, 5, 0, 0),
660     GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
661     - GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
662     + GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
663     GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
664     GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
665     GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
666     @@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
667     GATE(0, "aclk166", "mout_user_aclk166",
668     GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
669     GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
670     - GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
671     + GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
672     GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
673     GATE_BUS_TOP, 16, 0, 0),
674     GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
675     GATE_BUS_TOP, 17, 0, 0),
676     GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
677     - GATE_BUS_TOP, 18, 0, 0),
678     + GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
679     GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
680     GATE_BUS_TOP, 28, 0, 0),
681     GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
682     GATE_BUS_TOP, 29, 0, 0),
683    
684     GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
685     - SRC_MASK_TOP2, 24, 0, 0),
686     + SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
687    
688     GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
689     SRC_MASK_TOP7, 20, 0, 0),
690     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
691     index f2bb5122d2c2..063d176baa24 100644
692     --- a/drivers/gpio/gpiolib.c
693     +++ b/drivers/gpio/gpiolib.c
694     @@ -703,24 +703,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
695     {
696     struct lineevent_state *le = p;
697     struct gpioevent_data ge;
698     - int ret;
699     + int ret, level;
700    
701     ge.timestamp = ktime_get_real_ns();
702     + level = gpiod_get_value_cansleep(le->desc);
703    
704     if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
705     && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
706     - int level = gpiod_get_value_cansleep(le->desc);
707     -
708     if (level)
709     /* Emit low-to-high event */
710     ge.id = GPIOEVENT_EVENT_RISING_EDGE;
711     else
712     /* Emit high-to-low event */
713     ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
714     - } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
715     + } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
716     /* Emit low-to-high event */
717     ge.id = GPIOEVENT_EVENT_RISING_EDGE;
718     - } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
719     + } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
720     /* Emit high-to-low event */
721     ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
722     } else {
723     diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
724     index dc9511c5ecb8..327bdf13e8bc 100644
725     --- a/drivers/gpu/drm/amd/amdgpu/si.c
726     +++ b/drivers/gpu/drm/amd/amdgpu/si.c
727     @@ -1301,6 +1301,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
728     amdgpu_program_register_sequence(adev,
729     pitcairn_mgcg_cgcg_init,
730     (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
731     + break;
732     case CHIP_VERDE:
733     amdgpu_program_register_sequence(adev,
734     verde_golden_registers,
735     @@ -1325,6 +1326,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
736     amdgpu_program_register_sequence(adev,
737     oland_mgcg_cgcg_init,
738     (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
739     + break;
740     case CHIP_HAINAN:
741     amdgpu_program_register_sequence(adev,
742     hainan_golden_registers,
743     diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
744     index 2242a80866a9..dc2976c2bed3 100644
745     --- a/drivers/gpu/drm/virtio/virtgpu_fb.c
746     +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
747     @@ -337,7 +337,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
748     info->fbops = &virtio_gpufb_ops;
749     info->pixmap.flags = FB_PIXMAP_SYSTEM;
750    
751     - info->screen_base = obj->vmap;
752     + info->screen_buffer = obj->vmap;
753     info->screen_size = obj->gem_base.size;
754     drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
755     drm_fb_helper_fill_var(info, &vfbdev->helper,
756     diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
757     index f1510cc76d2d..9398143d7c5e 100644
758     --- a/drivers/infiniband/hw/cxgb4/cm.c
759     +++ b/drivers/infiniband/hw/cxgb4/cm.c
760     @@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
761     skb_trim(skb, dlen);
762     mutex_lock(&ep->com.mutex);
763    
764     - /* update RX credits */
765     - update_rx_credits(ep, dlen);
766     -
767     switch (ep->com.state) {
768     case MPA_REQ_SENT:
769     + update_rx_credits(ep, dlen);
770     ep->rcv_seq += dlen;
771     disconnect = process_mpa_reply(ep, skb);
772     break;
773     case MPA_REQ_WAIT:
774     + update_rx_credits(ep, dlen);
775     ep->rcv_seq += dlen;
776     disconnect = process_mpa_request(ep, skb);
777     break;
778     case FPDU_MODE: {
779     struct c4iw_qp_attributes attrs;
780     +
781     + update_rx_credits(ep, dlen);
782     BUG_ON(!ep->com.qp);
783     if (status)
784     pr_err("%s Unexpected streaming data." \
785     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
786     index 41800b6d492e..c380b7e8f1c6 100644
787     --- a/drivers/iommu/amd_iommu.c
788     +++ b/drivers/iommu/amd_iommu.c
789     @@ -4294,6 +4294,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
790     /* Setting */
791     irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
792     irte->hi.fields.vector = vcpu_pi_info->vector;
793     + irte->lo.fields_vapic.ga_log_intr = 1;
794     irte->lo.fields_vapic.guest_mode = 1;
795     irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
796    
797     diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
798     index a18fe5d47238..b4857cd7069e 100644
799     --- a/drivers/media/pci/saa7164/saa7164-bus.c
800     +++ b/drivers/media/pci/saa7164/saa7164-bus.c
801     @@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
802     msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
803     msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
804     msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
805     + memcpy(msg, &msg_tmp, sizeof(*msg));
806    
807     /* No need to update the read positions, because this was a peek */
808     /* If the caller specifically want to peek, return */
809     if (peekonly) {
810     - memcpy(msg, &msg_tmp, sizeof(*msg));
811     goto peekout;
812     }
813    
814     @@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
815     space_rem = bus->m_dwSizeGetRing - curr_grp;
816    
817     if (space_rem < sizeof(*msg)) {
818     - /* msg wraps around the ring */
819     - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
820     - memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
821     - sizeof(*msg) - space_rem);
822     if (buf)
823     memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
824     space_rem, buf_size);
825    
826     } else if (space_rem == sizeof(*msg)) {
827     - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
828     if (buf)
829     memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
830     } else {
831     /* Additional data wraps around the ring */
832     - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
833     if (buf) {
834     memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
835     sizeof(*msg), space_rem - sizeof(*msg));
836     @@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
837    
838     } else {
839     /* No wrapping */
840     - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
841     if (buf)
842     memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
843     buf_size);
844     }
845     - /* Convert from little endian to CPU */
846     - msg->size = le16_to_cpu((__force __le16)msg->size);
847     - msg->command = le32_to_cpu((__force __le32)msg->command);
848     - msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
849    
850     /* Update the read positions, adjusting the ring */
851     saa7164_writel(bus->m_dwGetReadPos, new_grp);
852     diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
853     index 6efb2f1631c4..bdb7a0a00932 100644
854     --- a/drivers/media/platform/davinci/vpfe_capture.c
855     +++ b/drivers/media/platform/davinci/vpfe_capture.c
856     @@ -1725,27 +1725,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
857    
858     switch (cmd) {
859     case VPFE_CMD_S_CCDC_RAW_PARAMS:
860     + ret = -EINVAL;
861     v4l2_warn(&vpfe_dev->v4l2_dev,
862     - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
863     - if (ccdc_dev->hw_ops.set_params) {
864     - ret = ccdc_dev->hw_ops.set_params(param);
865     - if (ret) {
866     - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
867     - "Error setting parameters in CCDC\n");
868     - goto unlock_out;
869     - }
870     - ret = vpfe_get_ccdc_image_format(vpfe_dev,
871     - &vpfe_dev->fmt);
872     - if (ret < 0) {
873     - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
874     - "Invalid image format at CCDC\n");
875     - goto unlock_out;
876     - }
877     - } else {
878     - ret = -EINVAL;
879     - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
880     - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
881     - }
882     + "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
883     break;
884     default:
885     ret = -ENOTTY;
886     diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
887     index c3277308a70b..b49f80cb49c9 100644
888     --- a/drivers/media/rc/ir-lirc-codec.c
889     +++ b/drivers/media/rc/ir-lirc-codec.c
890     @@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
891     return 0;
892    
893     case LIRC_GET_REC_RESOLUTION:
894     - val = dev->rx_resolution;
895     + val = dev->rx_resolution / 1000;
896     break;
897    
898     case LIRC_SET_WIDEBAND_RECEIVER:
899     diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
900     index 98f25ffb4258..848b3453517e 100644
901     --- a/drivers/mmc/core/host.c
902     +++ b/drivers/mmc/core/host.c
903     @@ -179,19 +179,17 @@ static void mmc_retune_timer(unsigned long data)
904     */
905     int mmc_of_parse(struct mmc_host *host)
906     {
907     - struct device_node *np;
908     + struct device *dev = host->parent;
909     u32 bus_width;
910     int ret;
911     bool cd_cap_invert, cd_gpio_invert = false;
912     bool ro_cap_invert, ro_gpio_invert = false;
913    
914     - if (!host->parent || !host->parent->of_node)
915     + if (!dev || !dev_fwnode(dev))
916     return 0;
917    
918     - np = host->parent->of_node;
919     -
920     /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
921     - if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
922     + if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
923     dev_dbg(host->parent,
924     "\"bus-width\" property is missing, assuming 1 bit.\n");
925     bus_width = 1;
926     @@ -213,7 +211,7 @@ int mmc_of_parse(struct mmc_host *host)
927     }
928    
929     /* f_max is obtained from the optional "max-frequency" property */
930     - of_property_read_u32(np, "max-frequency", &host->f_max);
931     + device_property_read_u32(dev, "max-frequency", &host->f_max);
932    
933     /*
934     * Configure CD and WP pins. They are both by default active low to
935     @@ -228,12 +226,12 @@ int mmc_of_parse(struct mmc_host *host)
936     */
937    
938     /* Parse Card Detection */
939     - if (of_property_read_bool(np, "non-removable")) {
940     + if (device_property_read_bool(dev, "non-removable")) {
941     host->caps |= MMC_CAP_NONREMOVABLE;
942     } else {
943     - cd_cap_invert = of_property_read_bool(np, "cd-inverted");
944     + cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
945    
946     - if (of_property_read_bool(np, "broken-cd"))
947     + if (device_property_read_bool(dev, "broken-cd"))
948     host->caps |= MMC_CAP_NEEDS_POLL;
949    
950     ret = mmc_gpiod_request_cd(host, "cd", 0, true,
951     @@ -259,7 +257,7 @@ int mmc_of_parse(struct mmc_host *host)
952     }
953    
954     /* Parse Write Protection */
955     - ro_cap_invert = of_property_read_bool(np, "wp-inverted");
956     + ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
957    
958     ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
959     if (!ret)
960     @@ -267,62 +265,62 @@ int mmc_of_parse(struct mmc_host *host)
961     else if (ret != -ENOENT && ret != -ENOSYS)
962     return ret;
963    
964     - if (of_property_read_bool(np, "disable-wp"))
965     + if (device_property_read_bool(dev, "disable-wp"))
966     host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
967    
968     /* See the comment on CD inversion above */
969     if (ro_cap_invert ^ ro_gpio_invert)
970     host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
971    
972     - if (of_property_read_bool(np, "cap-sd-highspeed"))
973     + if (device_property_read_bool(dev, "cap-sd-highspeed"))
974     host->caps |= MMC_CAP_SD_HIGHSPEED;
975     - if (of_property_read_bool(np, "cap-mmc-highspeed"))
976     + if (device_property_read_bool(dev, "cap-mmc-highspeed"))
977     host->caps |= MMC_CAP_MMC_HIGHSPEED;
978     - if (of_property_read_bool(np, "sd-uhs-sdr12"))
979     + if (device_property_read_bool(dev, "sd-uhs-sdr12"))
980     host->caps |= MMC_CAP_UHS_SDR12;
981     - if (of_property_read_bool(np, "sd-uhs-sdr25"))
982     + if (device_property_read_bool(dev, "sd-uhs-sdr25"))
983     host->caps |= MMC_CAP_UHS_SDR25;
984     - if (of_property_read_bool(np, "sd-uhs-sdr50"))
985     + if (device_property_read_bool(dev, "sd-uhs-sdr50"))
986     host->caps |= MMC_CAP_UHS_SDR50;
987     - if (of_property_read_bool(np, "sd-uhs-sdr104"))
988     + if (device_property_read_bool(dev, "sd-uhs-sdr104"))
989     host->caps |= MMC_CAP_UHS_SDR104;
990     - if (of_property_read_bool(np, "sd-uhs-ddr50"))
991     + if (device_property_read_bool(dev, "sd-uhs-ddr50"))
992     host->caps |= MMC_CAP_UHS_DDR50;
993     - if (of_property_read_bool(np, "cap-power-off-card"))
994     + if (device_property_read_bool(dev, "cap-power-off-card"))
995     host->caps |= MMC_CAP_POWER_OFF_CARD;
996     - if (of_property_read_bool(np, "cap-mmc-hw-reset"))
997     + if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
998     host->caps |= MMC_CAP_HW_RESET;
999     - if (of_property_read_bool(np, "cap-sdio-irq"))
1000     + if (device_property_read_bool(dev, "cap-sdio-irq"))
1001     host->caps |= MMC_CAP_SDIO_IRQ;
1002     - if (of_property_read_bool(np, "full-pwr-cycle"))
1003     + if (device_property_read_bool(dev, "full-pwr-cycle"))
1004     host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
1005     - if (of_property_read_bool(np, "keep-power-in-suspend"))
1006     + if (device_property_read_bool(dev, "keep-power-in-suspend"))
1007     host->pm_caps |= MMC_PM_KEEP_POWER;
1008     - if (of_property_read_bool(np, "wakeup-source") ||
1009     - of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
1010     + if (device_property_read_bool(dev, "wakeup-source") ||
1011     + device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
1012     host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
1013     - if (of_property_read_bool(np, "mmc-ddr-1_8v"))
1014     + if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
1015     host->caps |= MMC_CAP_1_8V_DDR;
1016     - if (of_property_read_bool(np, "mmc-ddr-1_2v"))
1017     + if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
1018     host->caps |= MMC_CAP_1_2V_DDR;
1019     - if (of_property_read_bool(np, "mmc-hs200-1_8v"))
1020     + if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
1021     host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
1022     - if (of_property_read_bool(np, "mmc-hs200-1_2v"))
1023     + if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
1024     host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
1025     - if (of_property_read_bool(np, "mmc-hs400-1_8v"))
1026     + if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
1027     host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
1028     - if (of_property_read_bool(np, "mmc-hs400-1_2v"))
1029     + if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
1030     host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
1031     - if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
1032     + if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
1033     host->caps2 |= MMC_CAP2_HS400_ES;
1034     - if (of_property_read_bool(np, "no-sdio"))
1035     + if (device_property_read_bool(dev, "no-sdio"))
1036     host->caps2 |= MMC_CAP2_NO_SDIO;
1037     - if (of_property_read_bool(np, "no-sd"))
1038     + if (device_property_read_bool(dev, "no-sd"))
1039     host->caps2 |= MMC_CAP2_NO_SD;
1040     - if (of_property_read_bool(np, "no-mmc"))
1041     + if (device_property_read_bool(dev, "no-mmc"))
1042     host->caps2 |= MMC_CAP2_NO_MMC;
1043    
1044     - host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
1045     + host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
1046     if (host->dsr_req && (host->dsr & ~0xffff)) {
1047     dev_err(host->parent,
1048     "device tree specified broken value for DSR: 0x%x, ignoring\n",
1049     diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1050     index f57700c4b8f0..323dba35bc9a 100644
1051     --- a/drivers/mmc/core/mmc.c
1052     +++ b/drivers/mmc/core/mmc.c
1053     @@ -1690,7 +1690,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1054     err = mmc_select_hs400(card);
1055     if (err)
1056     goto free_card;
1057     - } else {
1058     + } else if (!mmc_card_hs400es(card)) {
1059     /* Select the desired bus width optionally */
1060     err = mmc_select_bus_width(card);
1061     if (err > 0 && mmc_card_hs(card)) {
1062     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
1063     index df478ae72e23..f81f4175f49a 100644
1064     --- a/drivers/mmc/host/dw_mmc.c
1065     +++ b/drivers/mmc/host/dw_mmc.c
1066     @@ -2610,8 +2610,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1067     host->slot[id] = slot;
1068    
1069     mmc->ops = &dw_mci_ops;
1070     - if (of_property_read_u32_array(host->dev->of_node,
1071     - "clock-freq-min-max", freq, 2)) {
1072     + if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
1073     + freq, 2)) {
1074     mmc->f_min = DW_MCI_FREQ_MIN;
1075     mmc->f_max = DW_MCI_FREQ_MAX;
1076     } else {
1077     @@ -2709,7 +2709,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
1078     {
1079     int addr_config;
1080     struct device *dev = host->dev;
1081     - struct device_node *np = dev->of_node;
1082    
1083     /*
1084     * Check tansfer mode from HCON[17:16]
1085     @@ -2770,8 +2769,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
1086     dev_info(host->dev, "Using internal DMA controller.\n");
1087     } else {
1088     /* TRANS_MODE_EDMAC: check dma bindings again */
1089     - if ((of_property_count_strings(np, "dma-names") < 0) ||
1090     - (!of_find_property(np, "dmas", NULL))) {
1091     + if ((device_property_read_string_array(dev, "dma-names",
1092     + NULL, 0) < 0) ||
1093     + !device_property_present(dev, "dmas")) {
1094     goto no_dma;
1095     }
1096     host->dma_ops = &dw_mci_edmac_ops;
1097     @@ -2931,7 +2931,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1098     {
1099     struct dw_mci_board *pdata;
1100     struct device *dev = host->dev;
1101     - struct device_node *np = dev->of_node;
1102     const struct dw_mci_drv_data *drv_data = host->drv_data;
1103     int ret;
1104     u32 clock_frequency;
1105     @@ -2948,15 +2947,16 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
1106     }
1107    
1108     /* find out number of slots supported */
1109     - of_property_read_u32(np, "num-slots", &pdata->num_slots);
1110     + device_property_read_u32(dev, "num-slots", &pdata->num_slots);
1111    
1112     - if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
1113     + if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
1114     dev_info(dev,
1115     "fifo-depth property not found, using value of FIFOTH register as default\n");
1116    
1117     - of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
1118     + device_property_read_u32(dev, "card-detect-delay",
1119     + &pdata->detect_delay_ms);
1120    
1121     - if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
1122     + if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
1123     pdata->bus_hz = clock_frequency;
1124    
1125     if (drv_data && drv_data->parse_dt) {
1126     diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1127     index a8b430ff117b..83b84ffec27d 100644
1128     --- a/drivers/mmc/host/sdhci-of-at91.c
1129     +++ b/drivers/mmc/host/sdhci-of-at91.c
1130     @@ -31,6 +31,7 @@
1131    
1132     #define SDMMC_MC1R 0x204
1133     #define SDMMC_MC1R_DDR BIT(3)
1134     +#define SDMMC_MC1R_FCD BIT(7)
1135     #define SDMMC_CACR 0x230
1136     #define SDMMC_CACR_CAPWREN BIT(0)
1137     #define SDMMC_CACR_KEY (0x46 << 8)
1138     @@ -43,6 +44,15 @@ struct sdhci_at91_priv {
1139     struct clk *mainck;
1140     };
1141    
1142     +static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
1143     +{
1144     + u8 mc1r;
1145     +
1146     + mc1r = readb(host->ioaddr + SDMMC_MC1R);
1147     + mc1r |= SDMMC_MC1R_FCD;
1148     + writeb(mc1r, host->ioaddr + SDMMC_MC1R);
1149     +}
1150     +
1151     static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
1152     {
1153     u16 clk;
1154     @@ -112,10 +122,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
1155     sdhci_set_uhs_signaling(host, timing);
1156     }
1157    
1158     +static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
1159     +{
1160     + sdhci_reset(host, mask);
1161     +
1162     + if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1163     + sdhci_at91_set_force_card_detect(host);
1164     +}
1165     +
1166     static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
1167     .set_clock = sdhci_at91_set_clock,
1168     .set_bus_width = sdhci_set_bus_width,
1169     - .reset = sdhci_reset,
1170     + .reset = sdhci_at91_reset,
1171     .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
1172     .set_power = sdhci_at91_set_power,
1173     };
1174     @@ -322,6 +340,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1175     host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1176     }
1177    
1178     + /*
1179     + * If the device attached to the MMC bus is not removable, it is safer
1180     + * to set the Force Card Detect bit. People often don't connect the
1181     + * card detect signal and use this pin for another purpose. If the card
1182     + * detect pin is not muxed to SDHCI controller, a default value is
1183     + * used. This value can be different from a SoC revision to another
1184     + * one. Problems come when this default value is not card present. To
1185     + * avoid this case, if the device is non removable then the card
1186     + * detection procedure using the SDMCC_CD signal is bypassed.
1187     + * This bit is reset when a software reset for all command is performed
1188     + * so we need to implement our own reset function to set back this bit.
1189     + */
1190     + if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
1191     + sdhci_at91_set_force_card_detect(host);
1192     +
1193     pm_runtime_put_autosuspend(&pdev->dev);
1194    
1195     return 0;
1196     diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1197     index 947adda3397d..3ec573c13dac 100644
1198     --- a/drivers/net/dsa/b53/b53_common.c
1199     +++ b/drivers/net/dsa/b53/b53_common.c
1200     @@ -1558,6 +1558,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1201     .dev_name = "BCM53125",
1202     .vlans = 4096,
1203     .enabled_ports = 0xff,
1204     + .arl_entries = 4,
1205     .cpu_port = B53_CPU_PORT,
1206     .vta_regs = B53_VTA_REGS,
1207     .duplex_reg = B53_DUPLEX_STAT_GE,
1208     diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
1209     index e078d8da978c..29d29af612d1 100644
1210     --- a/drivers/net/ethernet/aurora/nb8800.c
1211     +++ b/drivers/net/ethernet/aurora/nb8800.c
1212     @@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
1213     mac_mode |= HALF_DUPLEX;
1214    
1215     if (gigabit) {
1216     - if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
1217     + if (phy_interface_is_rgmii(dev->phydev))
1218     mac_mode |= RGMII_MODE;
1219    
1220     mac_mode |= GMAC_MODE;
1221     @@ -1277,11 +1277,10 @@ static int nb8800_tangox_init(struct net_device *dev)
1222     break;
1223    
1224     case PHY_INTERFACE_MODE_RGMII:
1225     - pad_mode = PAD_MODE_RGMII;
1226     - break;
1227     -
1228     + case PHY_INTERFACE_MODE_RGMII_ID:
1229     + case PHY_INTERFACE_MODE_RGMII_RXID:
1230     case PHY_INTERFACE_MODE_RGMII_TXID:
1231     - pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1232     + pad_mode = PAD_MODE_RGMII;
1233     break;
1234    
1235     default:
1236     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1237     index a927a730da10..edae2dcc4927 100644
1238     --- a/drivers/net/ethernet/broadcom/tg3.c
1239     +++ b/drivers/net/ethernet/broadcom/tg3.c
1240     @@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
1241     tg3_mem_rx_release(tp);
1242     tg3_mem_tx_release(tp);
1243    
1244     + /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
1245     + tg3_full_lock(tp, 0);
1246     if (tp->hw_stats) {
1247     dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
1248     tp->hw_stats, tp->stats_mapping);
1249     tp->hw_stats = NULL;
1250     }
1251     + tg3_full_unlock(tp);
1252     }
1253    
1254     /*
1255     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1256     index cb45390c7623..f7fabecc104f 100644
1257     --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1258     +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
1259     @@ -770,6 +770,10 @@ static void cb_timeout_handler(struct work_struct *work)
1260     mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1261     }
1262    
1263     +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
1264     +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1265     + struct mlx5_cmd_msg *msg);
1266     +
1267     static void cmd_work_handler(struct work_struct *work)
1268     {
1269     struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
1270     @@ -779,16 +783,27 @@ static void cmd_work_handler(struct work_struct *work)
1271     struct mlx5_cmd_layout *lay;
1272     struct semaphore *sem;
1273     unsigned long flags;
1274     + int alloc_ret;
1275    
1276     sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
1277     down(sem);
1278     if (!ent->page_queue) {
1279     - ent->idx = alloc_ent(cmd);
1280     - if (ent->idx < 0) {
1281     + alloc_ret = alloc_ent(cmd);
1282     + if (alloc_ret < 0) {
1283     + if (ent->callback) {
1284     + ent->callback(-EAGAIN, ent->context);
1285     + mlx5_free_cmd_msg(dev, ent->out);
1286     + free_msg(dev, ent->in);
1287     + free_cmd(ent);
1288     + } else {
1289     + ent->ret = -EAGAIN;
1290     + complete(&ent->done);
1291     + }
1292     mlx5_core_err(dev, "failed to allocate command entry\n");
1293     up(sem);
1294     return;
1295     }
1296     + ent->idx = alloc_ret;
1297     } else {
1298     ent->idx = cmd->max_reg_cmds;
1299     spin_lock_irqsave(&cmd->alloc_lock, flags);
1300     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1301     index 13dc388667b6..1612ec0d9103 100644
1302     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1303     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
1304     @@ -62,12 +62,14 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
1305     struct delayed_work *dwork = to_delayed_work(work);
1306     struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
1307     overflow_work);
1308     + struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
1309     unsigned long flags;
1310    
1311     write_lock_irqsave(&tstamp->lock, flags);
1312     timecounter_read(&tstamp->clock);
1313     write_unlock_irqrestore(&tstamp->lock, flags);
1314     - schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
1315     + queue_delayed_work(priv->wq, &tstamp->overflow_work,
1316     + msecs_to_jiffies(tstamp->overflow_period * 1000));
1317     }
1318    
1319     int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
1320     @@ -263,7 +265,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
1321    
1322     INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
1323     if (tstamp->overflow_period)
1324     - schedule_delayed_work(&tstamp->overflow_work, 0);
1325     + queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
1326     else
1327     mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
1328    
1329     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1330     index e034dbc4913d..cf070fc0fb6b 100644
1331     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1332     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
1333     @@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
1334    
1335     static bool outer_header_zero(u32 *match_criteria)
1336     {
1337     - int size = MLX5_ST_SZ_BYTES(fte_match_param);
1338     + int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
1339     char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
1340     outer_headers);
1341    
1342     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1343     index 6ffd5d2a70aa..52a38106448e 100644
1344     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1345     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1346     @@ -651,9 +651,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1347     int vport;
1348     int err;
1349    
1350     + /* disable PF RoCE so missed packets don't go through RoCE steering */
1351     + mlx5_dev_list_lock();
1352     + mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1353     + mlx5_dev_list_unlock();
1354     +
1355     err = esw_create_offloads_fdb_table(esw, nvports);
1356     if (err)
1357     - return err;
1358     + goto create_fdb_err;
1359    
1360     err = esw_create_offloads_table(esw);
1361     if (err)
1362     @@ -673,11 +678,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1363     goto err_reps;
1364     }
1365    
1366     - /* disable PF RoCE so missed packets don't go through RoCE steering */
1367     - mlx5_dev_list_lock();
1368     - mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1369     - mlx5_dev_list_unlock();
1370     -
1371     return 0;
1372    
1373     err_reps:
1374     @@ -694,6 +694,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1375    
1376     create_ft_err:
1377     esw_destroy_offloads_fdb_table(esw);
1378     +
1379     +create_fdb_err:
1380     + /* enable back PF RoCE */
1381     + mlx5_dev_list_lock();
1382     + mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1383     + mlx5_dev_list_unlock();
1384     +
1385     return err;
1386     }
1387    
1388     @@ -701,11 +708,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
1389     {
1390     int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1391    
1392     - /* enable back PF RoCE */
1393     - mlx5_dev_list_lock();
1394     - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1395     - mlx5_dev_list_unlock();
1396     -
1397     mlx5_eswitch_disable_sriov(esw);
1398     err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1399     if (err) {
1400     @@ -715,6 +717,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
1401     esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
1402     }
1403    
1404     + /* enable back PF RoCE */
1405     + mlx5_dev_list_lock();
1406     + mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1407     + mlx5_dev_list_unlock();
1408     +
1409     return err;
1410     }
1411    
1412     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1413     index b5d5519542e8..0ca4623bda6b 100644
1414     --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1415     +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
1416     @@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
1417     static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
1418     u8 *port1, u8 *port2)
1419     {
1420     - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1421     - if (tracker->netdev_state[0].tx_enabled) {
1422     - *port1 = 1;
1423     - *port2 = 1;
1424     - } else {
1425     - *port1 = 2;
1426     - *port2 = 2;
1427     - }
1428     - } else {
1429     - *port1 = 1;
1430     - *port2 = 2;
1431     - if (!tracker->netdev_state[0].link_up)
1432     - *port1 = 2;
1433     - else if (!tracker->netdev_state[1].link_up)
1434     - *port2 = 1;
1435     + *port1 = 1;
1436     + *port2 = 2;
1437     + if (!tracker->netdev_state[0].tx_enabled ||
1438     + !tracker->netdev_state[0].link_up) {
1439     + *port1 = 2;
1440     + return;
1441     }
1442     +
1443     + if (!tracker->netdev_state[1].tx_enabled ||
1444     + !tracker->netdev_state[1].link_up)
1445     + *port2 = 1;
1446     }
1447    
1448     static void mlx5_activate_lag(struct mlx5_lag *ldev,
1449     diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1450     index 12be259394c6..2140dedab712 100644
1451     --- a/drivers/net/ethernet/renesas/sh_eth.c
1452     +++ b/drivers/net/ethernet/renesas/sh_eth.c
1453     @@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
1454     .rpadir_value = 2 << 16,
1455     .no_trimd = 1,
1456     .no_ade = 1,
1457     + .hw_crc = 1,
1458     .tsu = 1,
1459     .select_mii = 1,
1460     .shift_rd0 = 1,
1461     @@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
1462    
1463     .ecsr_value = ECSR_ICD | ECSR_MPD,
1464     .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1465     - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
1466     + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
1467    
1468     .tx_check = EESR_TC1 | EESR_FTC,
1469     .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1470     @@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
1471    
1472     .ecsr_value = ECSR_ICD | ECSR_MPD,
1473     .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1474     - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
1475     + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
1476    
1477     .tx_check = EESR_TC1 | EESR_FTC,
1478     .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1479     diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
1480     index bca6a1e72d1d..e1bb802d4a4d 100644
1481     --- a/drivers/net/irda/mcs7780.c
1482     +++ b/drivers/net/irda/mcs7780.c
1483     @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
1484     static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
1485     {
1486     struct usb_device *dev = mcs->usbdev;
1487     - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1488     - MCS_RD_RTYPE, 0, reg, val, 2,
1489     - msecs_to_jiffies(MCS_CTRL_TIMEOUT));
1490     + void *dmabuf;
1491     + int ret;
1492     +
1493     + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
1494     + if (!dmabuf)
1495     + return -ENOMEM;
1496     +
1497     + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1498     + MCS_RD_RTYPE, 0, reg, dmabuf, 2,
1499     + msecs_to_jiffies(MCS_CTRL_TIMEOUT));
1500     +
1501     + memcpy(val, dmabuf, sizeof(__u16));
1502     + kfree(dmabuf);
1503    
1504     return ret;
1505     }
1506     diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
1507     index 4cad95552cf1..01cf094bee18 100644
1508     --- a/drivers/net/phy/dp83867.c
1509     +++ b/drivers/net/phy/dp83867.c
1510     @@ -29,6 +29,7 @@
1511     #define MII_DP83867_MICR 0x12
1512     #define MII_DP83867_ISR 0x13
1513     #define DP83867_CTRL 0x1f
1514     +#define DP83867_CFG3 0x1e
1515    
1516     /* Extended Registers */
1517     #define DP83867_RGMIICTL 0x0032
1518     @@ -90,6 +91,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
1519     micr_status |=
1520     (MII_DP83867_MICR_AN_ERR_INT_EN |
1521     MII_DP83867_MICR_SPEED_CHNG_INT_EN |
1522     + MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
1523     + MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
1524     MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
1525     MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
1526    
1527     @@ -190,6 +193,13 @@ static int dp83867_config_init(struct phy_device *phydev)
1528     DP83867_DEVADDR, delay);
1529     }
1530    
1531     + /* Enable Interrupt output INT_OE in CFG3 register */
1532     + if (phy_interrupt_is_valid(phydev)) {
1533     + val = phy_read(phydev, DP83867_CFG3);
1534     + val |= BIT(7);
1535     + phy_write(phydev, DP83867_CFG3, val);
1536     + }
1537     +
1538     return 0;
1539     }
1540    
1541     diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1542     index edd30ebbf275..775a6e1fdef9 100644
1543     --- a/drivers/net/phy/phy.c
1544     +++ b/drivers/net/phy/phy.c
1545     @@ -674,6 +674,9 @@ void phy_stop_machine(struct phy_device *phydev)
1546     if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
1547     phydev->state = PHY_UP;
1548     mutex_unlock(&phydev->lock);
1549     +
1550     + /* Now we can run the state machine synchronously */
1551     + phy_state_machine(&phydev->state_queue.work);
1552     }
1553    
1554     /**
1555     @@ -1060,6 +1063,15 @@ void phy_state_machine(struct work_struct *work)
1556     if (old_link != phydev->link)
1557     phydev->state = PHY_CHANGELINK;
1558     }
1559     + /*
1560     + * Failsafe: check that nobody set phydev->link=0 between two
1561     + * poll cycles, otherwise we won't leave RUNNING state as long
1562     + * as link remains down.
1563     + */
1564     + if (!phydev->link && phydev->state == PHY_RUNNING) {
1565     + phydev->state = PHY_CHANGELINK;
1566     + phydev_err(phydev, "no link in PHY_RUNNING\n");
1567     + }
1568     break;
1569     case PHY_CHANGELINK:
1570     err = phy_read_status(phydev);
1571     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1572     index 9e7b7836774f..bf02f8e4648a 100644
1573     --- a/drivers/net/phy/phy_device.c
1574     +++ b/drivers/net/phy/phy_device.c
1575     @@ -1714,6 +1714,8 @@ static int phy_remove(struct device *dev)
1576     {
1577     struct phy_device *phydev = to_phy_device(dev);
1578    
1579     + cancel_delayed_work_sync(&phydev->state_queue);
1580     +
1581     mutex_lock(&phydev->lock);
1582     phydev->state = PHY_DOWN;
1583     mutex_unlock(&phydev->lock);
1584     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1585     index 8744b9beda33..8e3c6f4bdaa0 100644
1586     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1587     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1588     @@ -4161,11 +4161,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
1589     goto fail;
1590     }
1591    
1592     - /* allocate scatter-gather table. sg support
1593     - * will be disabled upon allocation failure.
1594     - */
1595     - brcmf_sdiod_sgtable_alloc(bus->sdiodev);
1596     -
1597     /* Query the F2 block size, set roundup accordingly */
1598     bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
1599     bus->roundup = min(max_roundup, bus->blocksize);
1600     diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1601     index 4b97371c3b42..838946d17b59 100644
1602     --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1603     +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
1604     @@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1605     next_reclaimed;
1606     IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1607     next_reclaimed);
1608     + iwlagn_check_ratid_empty(priv, sta_id, tid);
1609     }
1610    
1611     iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1612    
1613     - iwlagn_check_ratid_empty(priv, sta_id, tid);
1614     freed = 0;
1615    
1616     /* process frames */
1617     diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1618     index 3ce1f7da8647..cb7365bdf6e0 100644
1619     --- a/drivers/net/xen-netback/common.h
1620     +++ b/drivers/net/xen-netback/common.h
1621     @@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
1622     unsigned long remaining_credit;
1623     struct timer_list credit_timeout;
1624     u64 credit_window_start;
1625     + bool rate_limited;
1626    
1627     /* Statistics */
1628     struct xenvif_stats stats;
1629     diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1630     index b009d7966b46..5bfaf5578810 100644
1631     --- a/drivers/net/xen-netback/interface.c
1632     +++ b/drivers/net/xen-netback/interface.c
1633     @@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
1634    
1635     if (work_done < budget) {
1636     napi_complete(napi);
1637     - xenvif_napi_schedule_or_enable_events(queue);
1638     + /* If the queue is rate-limited, it shall be
1639     + * rescheduled in the timer callback.
1640     + */
1641     + if (likely(!queue->rate_limited))
1642     + xenvif_napi_schedule_or_enable_events(queue);
1643     }
1644    
1645     return work_done;
1646     diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1647     index 47b481095d77..d9b5b73c35a0 100644
1648     --- a/drivers/net/xen-netback/netback.c
1649     +++ b/drivers/net/xen-netback/netback.c
1650     @@ -179,6 +179,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
1651     max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
1652    
1653     queue->remaining_credit = min(max_credit, max_burst);
1654     + queue->rate_limited = false;
1655     }
1656    
1657     void xenvif_tx_credit_callback(unsigned long data)
1658     @@ -685,8 +686,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1659     msecs_to_jiffies(queue->credit_usec / 1000);
1660    
1661     /* Timer could already be pending in rare cases. */
1662     - if (timer_pending(&queue->credit_timeout))
1663     + if (timer_pending(&queue->credit_timeout)) {
1664     + queue->rate_limited = true;
1665     return true;
1666     + }
1667    
1668     /* Passed the point where we can replenish credit? */
1669     if (time_after_eq64(now, next_credit)) {
1670     @@ -701,6 +704,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1671     mod_timer(&queue->credit_timeout,
1672     next_credit);
1673     queue->credit_window_start = next_credit;
1674     + queue->rate_limited = true;
1675    
1676     return true;
1677     }
1678     diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
1679     index ad33238cef17..8c4641b518b5 100644
1680     --- a/drivers/scsi/qla2xxx/qla_attr.c
1681     +++ b/drivers/scsi/qla2xxx/qla_attr.c
1682     @@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
1683     struct qla_hw_data *ha = vha->hw;
1684     ssize_t rval = 0;
1685    
1686     + mutex_lock(&ha->optrom_mutex);
1687     +
1688     if (ha->optrom_state != QLA_SREADING)
1689     - return 0;
1690     + goto out;
1691    
1692     - mutex_lock(&ha->optrom_mutex);
1693     rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
1694     ha->optrom_region_size);
1695     +
1696     +out:
1697     mutex_unlock(&ha->optrom_mutex);
1698    
1699     return rval;
1700     @@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
1701     struct device, kobj)));
1702     struct qla_hw_data *ha = vha->hw;
1703    
1704     - if (ha->optrom_state != QLA_SWRITING)
1705     + mutex_lock(&ha->optrom_mutex);
1706     +
1707     + if (ha->optrom_state != QLA_SWRITING) {
1708     + mutex_unlock(&ha->optrom_mutex);
1709     return -EINVAL;
1710     - if (off > ha->optrom_region_size)
1711     + }
1712     + if (off > ha->optrom_region_size) {
1713     + mutex_unlock(&ha->optrom_mutex);
1714     return -ERANGE;
1715     + }
1716     if (off + count > ha->optrom_region_size)
1717     count = ha->optrom_region_size - off;
1718    
1719     - mutex_lock(&ha->optrom_mutex);
1720     memcpy(&ha->optrom_buffer[off], buf, count);
1721     mutex_unlock(&ha->optrom_mutex);
1722    
1723     diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
1724     index 2b1456e5e221..c1eafbd7610a 100644
1725     --- a/drivers/spi/spi-axi-spi-engine.c
1726     +++ b/drivers/spi/spi-axi-spi-engine.c
1727     @@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
1728     SPI_ENGINE_VERSION_MAJOR(version),
1729     SPI_ENGINE_VERSION_MINOR(version),
1730     SPI_ENGINE_VERSION_PATCH(version));
1731     - return -ENODEV;
1732     + ret = -ENODEV;
1733     + goto err_put_master;
1734     }
1735    
1736     spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1737     diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
1738     index 6693d7c69f97..e8efb4299a95 100644
1739     --- a/drivers/target/iscsi/iscsi_target_nego.c
1740     +++ b/drivers/target/iscsi/iscsi_target_nego.c
1741     @@ -490,14 +490,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
1742    
1743     static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
1744    
1745     -static bool iscsi_target_sk_state_check(struct sock *sk)
1746     +static bool __iscsi_target_sk_check_close(struct sock *sk)
1747     {
1748     if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
1749     - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
1750     + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
1751     "returning FALSE\n");
1752     - return false;
1753     + return true;
1754     }
1755     - return true;
1756     + return false;
1757     +}
1758     +
1759     +static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
1760     +{
1761     + bool state = false;
1762     +
1763     + if (conn->sock) {
1764     + struct sock *sk = conn->sock->sk;
1765     +
1766     + read_lock_bh(&sk->sk_callback_lock);
1767     + state = (__iscsi_target_sk_check_close(sk) ||
1768     + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
1769     + read_unlock_bh(&sk->sk_callback_lock);
1770     + }
1771     + return state;
1772     +}
1773     +
1774     +static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
1775     +{
1776     + bool state = false;
1777     +
1778     + if (conn->sock) {
1779     + struct sock *sk = conn->sock->sk;
1780     +
1781     + read_lock_bh(&sk->sk_callback_lock);
1782     + state = test_bit(flag, &conn->login_flags);
1783     + read_unlock_bh(&sk->sk_callback_lock);
1784     + }
1785     + return state;
1786     +}
1787     +
1788     +static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
1789     +{
1790     + bool state = false;
1791     +
1792     + if (conn->sock) {
1793     + struct sock *sk = conn->sock->sk;
1794     +
1795     + write_lock_bh(&sk->sk_callback_lock);
1796     + state = (__iscsi_target_sk_check_close(sk) ||
1797     + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
1798     + if (!state)
1799     + clear_bit(flag, &conn->login_flags);
1800     + write_unlock_bh(&sk->sk_callback_lock);
1801     + }
1802     + return state;
1803     }
1804    
1805     static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
1806     @@ -537,6 +583,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1807    
1808     pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
1809     conn, current->comm, current->pid);
1810     + /*
1811     + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
1812     + * before initial PDU processing in iscsi_target_start_negotiation()
1813     + * has completed, go ahead and retry until it's cleared.
1814     + *
1815     + * Otherwise if the TCP connection drops while this is occuring,
1816     + * iscsi_target_start_negotiation() will detect the failure, call
1817     + * cancel_delayed_work_sync(&conn->login_work), and cleanup the
1818     + * remaining iscsi connection resources from iscsi_np process context.
1819     + */
1820     + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
1821     + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
1822     + return;
1823     + }
1824    
1825     spin_lock(&tpg->tpg_state_lock);
1826     state = (tpg->tpg_state == TPG_STATE_ACTIVE);
1827     @@ -544,26 +604,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1828    
1829     if (!state) {
1830     pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
1831     - iscsi_target_restore_sock_callbacks(conn);
1832     - iscsi_target_login_drop(conn, login);
1833     - iscsit_deaccess_np(np, tpg, tpg_np);
1834     - return;
1835     + goto err;
1836     }
1837    
1838     - if (conn->sock) {
1839     - struct sock *sk = conn->sock->sk;
1840     -
1841     - read_lock_bh(&sk->sk_callback_lock);
1842     - state = iscsi_target_sk_state_check(sk);
1843     - read_unlock_bh(&sk->sk_callback_lock);
1844     -
1845     - if (!state) {
1846     - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
1847     - iscsi_target_restore_sock_callbacks(conn);
1848     - iscsi_target_login_drop(conn, login);
1849     - iscsit_deaccess_np(np, tpg, tpg_np);
1850     - return;
1851     - }
1852     + if (iscsi_target_sk_check_close(conn)) {
1853     + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
1854     + goto err;
1855     }
1856    
1857     conn->login_kworker = current;
1858     @@ -581,34 +627,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
1859     flush_signals(current);
1860     conn->login_kworker = NULL;
1861    
1862     - if (rc < 0) {
1863     - iscsi_target_restore_sock_callbacks(conn);
1864     - iscsi_target_login_drop(conn, login);
1865     - iscsit_deaccess_np(np, tpg, tpg_np);
1866     - return;
1867     - }
1868     + if (rc < 0)
1869     + goto err;
1870    
1871     pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
1872     conn, current->comm, current->pid);
1873    
1874     rc = iscsi_target_do_login(conn, login);
1875     if (rc < 0) {
1876     - iscsi_target_restore_sock_callbacks(conn);
1877     - iscsi_target_login_drop(conn, login);
1878     - iscsit_deaccess_np(np, tpg, tpg_np);
1879     + goto err;
1880     } else if (!rc) {
1881     - if (conn->sock) {
1882     - struct sock *sk = conn->sock->sk;
1883     -
1884     - write_lock_bh(&sk->sk_callback_lock);
1885     - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
1886     - write_unlock_bh(&sk->sk_callback_lock);
1887     - }
1888     + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
1889     + goto err;
1890     } else if (rc == 1) {
1891     iscsi_target_nego_release(conn);
1892     iscsi_post_login_handler(np, conn, zero_tsih);
1893     iscsit_deaccess_np(np, tpg, tpg_np);
1894     }
1895     + return;
1896     +
1897     +err:
1898     + iscsi_target_restore_sock_callbacks(conn);
1899     + iscsi_target_login_drop(conn, login);
1900     + iscsit_deaccess_np(np, tpg, tpg_np);
1901     }
1902    
1903     static void iscsi_target_do_cleanup(struct work_struct *work)
1904     @@ -656,31 +697,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
1905     orig_state_change(sk);
1906     return;
1907     }
1908     + state = __iscsi_target_sk_check_close(sk);
1909     + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
1910     +
1911     if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
1912     pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
1913     " conn: %p\n", conn);
1914     + if (state)
1915     + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
1916     write_unlock_bh(&sk->sk_callback_lock);
1917     orig_state_change(sk);
1918     return;
1919     }
1920     - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
1921     + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
1922     pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
1923     conn);
1924     write_unlock_bh(&sk->sk_callback_lock);
1925     orig_state_change(sk);
1926     return;
1927     }
1928     + /*
1929     + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
1930     + * but only queue conn->login_work -> iscsi_target_do_login_rx()
1931     + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
1932     + *
1933     + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
1934     + * will detect the dropped TCP connection from delayed workqueue context.
1935     + *
1936     + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
1937     + * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
1938     + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
1939     + * via iscsi_target_sk_check_and_clear() is responsible for detecting the
1940     + * dropped TCP connection in iscsi_np process context, and cleaning up
1941     + * the remaining iscsi connection resources.
1942     + */
1943     + if (state) {
1944     + pr_debug("iscsi_target_sk_state_change got failed state\n");
1945     + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
1946     + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
1947     + write_unlock_bh(&sk->sk_callback_lock);
1948    
1949     - state = iscsi_target_sk_state_check(sk);
1950     - write_unlock_bh(&sk->sk_callback_lock);
1951     -
1952     - pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
1953     + orig_state_change(sk);
1954    
1955     - if (!state) {
1956     - pr_debug("iscsi_target_sk_state_change got failed state\n");
1957     - schedule_delayed_work(&conn->login_cleanup_work, 0);
1958     + if (!state)
1959     + schedule_delayed_work(&conn->login_work, 0);
1960     return;
1961     }
1962     + write_unlock_bh(&sk->sk_callback_lock);
1963     +
1964     orig_state_change(sk);
1965     }
1966    
1967     @@ -945,6 +1009,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
1968     if (iscsi_target_handle_csg_one(conn, login) < 0)
1969     return -1;
1970     if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
1971     + /*
1972     + * Check to make sure the TCP connection has not
1973     + * dropped asynchronously while session reinstatement
1974     + * was occuring in this kthread context, before
1975     + * transitioning to full feature phase operation.
1976     + */
1977     + if (iscsi_target_sk_check_close(conn))
1978     + return -1;
1979     +
1980     login->tsih = conn->sess->tsih;
1981     login->login_complete = 1;
1982     iscsi_target_restore_sock_callbacks(conn);
1983     @@ -971,21 +1044,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
1984     break;
1985     }
1986    
1987     - if (conn->sock) {
1988     - struct sock *sk = conn->sock->sk;
1989     - bool state;
1990     -
1991     - read_lock_bh(&sk->sk_callback_lock);
1992     - state = iscsi_target_sk_state_check(sk);
1993     - read_unlock_bh(&sk->sk_callback_lock);
1994     -
1995     - if (!state) {
1996     - pr_debug("iscsi_target_do_login() failed state for"
1997     - " conn: %p\n", conn);
1998     - return -1;
1999     - }
2000     - }
2001     -
2002     return 0;
2003     }
2004    
2005     @@ -1252,13 +1310,25 @@ int iscsi_target_start_negotiation(
2006     if (conn->sock) {
2007     struct sock *sk = conn->sock->sk;
2008    
2009     - write_lock_bh(&sk->sk_callback_lock);
2010     - set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
2011     - write_unlock_bh(&sk->sk_callback_lock);
2012     - }
2013     + write_lock_bh(&sk->sk_callback_lock);
2014     + set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
2015     + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
2016     + write_unlock_bh(&sk->sk_callback_lock);
2017     + }
2018     + /*
2019     + * If iscsi_target_do_login returns zero to signal more PDU
2020     + * exchanges are required to complete the login, go ahead and
2021     + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
2022     + * is still active.
2023     + *
2024     + * Otherwise if TCP connection dropped asynchronously, go ahead
2025     + * and perform connection cleanup now.
2026     + */
2027     + ret = iscsi_target_do_login(conn, login);
2028     + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
2029     + ret = -1;
2030    
2031     - ret = iscsi_target_do_login(conn, login);
2032     - if (ret < 0) {
2033     + if (ret < 0) {
2034     cancel_delayed_work_sync(&conn->login_work);
2035     cancel_delayed_work_sync(&conn->login_cleanup_work);
2036     iscsi_target_restore_sock_callbacks(conn);
2037     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2038     index 14a37ff0b9e3..705bb5f5a87f 100644
2039     --- a/fs/btrfs/extent-tree.c
2040     +++ b/fs/btrfs/extent-tree.c
2041     @@ -4759,10 +4759,6 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
2042     else
2043     flush = BTRFS_RESERVE_NO_FLUSH;
2044     spin_lock(&space_info->lock);
2045     - if (can_overcommit(root, space_info, orig, flush)) {
2046     - spin_unlock(&space_info->lock);
2047     - break;
2048     - }
2049     if (list_empty(&space_info->tickets) &&
2050     list_empty(&space_info->priority_tickets)) {
2051     spin_unlock(&space_info->lock);
2052     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
2053     index 9e77c089e8cb..d17d12ed6f73 100644
2054     --- a/fs/ext4/file.c
2055     +++ b/fs/ext4/file.c
2056     @@ -469,6 +469,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2057     lastoff = page_offset(page);
2058     bh = head = page_buffers(page);
2059     do {
2060     + if (lastoff + bh->b_size <= startoff)
2061     + goto next;
2062     if (buffer_uptodate(bh) ||
2063     buffer_unwritten(bh)) {
2064     if (whence == SEEK_DATA)
2065     @@ -483,6 +485,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
2066     unlock_page(page);
2067     goto out;
2068     }
2069     +next:
2070     lastoff += bh->b_size;
2071     bh = bh->b_this_page;
2072     } while (bh != head);
2073     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2074     index cf681004b196..95bf46654153 100644
2075     --- a/fs/ext4/resize.c
2076     +++ b/fs/ext4/resize.c
2077     @@ -1926,7 +1926,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2078     n_desc_blocks = o_desc_blocks +
2079     le16_to_cpu(es->s_reserved_gdt_blocks);
2080     n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2081     - n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
2082     + n_blocks_count = (ext4_fsblk_t)n_group *
2083     + EXT4_BLOCKS_PER_GROUP(sb);
2084     n_group--; /* set to last group number */
2085     }
2086    
2087     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2088     index 7e0c002c12e9..eb20b8767f3c 100644
2089     --- a/fs/f2fs/super.c
2090     +++ b/fs/f2fs/super.c
2091     @@ -1424,6 +1424,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2092     unsigned int total, fsmeta;
2093     struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2094     struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2095     + unsigned int main_segs, blocks_per_seg;
2096     + int i;
2097    
2098     total = le32_to_cpu(raw_super->segment_count);
2099     fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2100     @@ -1435,6 +1437,20 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
2101     if (unlikely(fsmeta >= total))
2102     return 1;
2103    
2104     + main_segs = le32_to_cpu(raw_super->segment_count_main);
2105     + blocks_per_seg = sbi->blocks_per_seg;
2106     +
2107     + for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2108     + if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2109     + le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2110     + return 1;
2111     + }
2112     + for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2113     + if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2114     + le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2115     + return 1;
2116     + }
2117     +
2118     if (unlikely(f2fs_cp_error(sbi))) {
2119     f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
2120     return 1;
2121     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2122     index 46ca7881d80d..a53b8e0c896a 100644
2123     --- a/fs/nfs/nfs4proc.c
2124     +++ b/fs/nfs/nfs4proc.c
2125     @@ -7410,7 +7410,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
2126     cdata->res.server_scope = NULL;
2127     }
2128     /* Save the EXCHANGE_ID verifier session trunk tests */
2129     - memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
2130     + memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
2131     sizeof(clp->cl_confirm.data));
2132     }
2133     out:
2134     @@ -7447,7 +7447,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
2135     static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2136     u32 sp4_how, struct rpc_xprt *xprt)
2137     {
2138     - nfs4_verifier verifier;
2139     struct rpc_message msg = {
2140     .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
2141     .rpc_cred = cred,
2142     @@ -7470,8 +7469,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2143     if (!calldata)
2144     goto out;
2145    
2146     - if (!xprt)
2147     - nfs4_init_boot_verifier(clp, &verifier);
2148     + nfs4_init_boot_verifier(clp, &calldata->args.verifier);
2149    
2150     status = nfs4_init_uniform_client_string(clp);
2151     if (status)
2152     @@ -7516,9 +7514,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
2153     task_setup_data.rpc_xprt = xprt;
2154     task_setup_data.flags =
2155     RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
2156     - calldata->args.verifier = &clp->cl_confirm;
2157     - } else {
2158     - calldata->args.verifier = &verifier;
2159     + memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
2160     + sizeof(calldata->args.verifier.data));
2161     }
2162     calldata->args.client = clp;
2163     #ifdef CONFIG_NFS_V4_1_MIGRATION
2164     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
2165     index c9c4d9855976..5e2724a928ed 100644
2166     --- a/fs/nfs/nfs4xdr.c
2167     +++ b/fs/nfs/nfs4xdr.c
2168     @@ -1761,7 +1761,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
2169     int len = 0;
2170    
2171     encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
2172     - encode_nfs4_verifier(xdr, args->verifier);
2173     + encode_nfs4_verifier(xdr, &args->verifier);
2174    
2175     encode_string(xdr, strlen(args->client->cl_owner_id),
2176     args->client->cl_owner_id);
2177     diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
2178     index bfc204e70338..cd32a49ae81e 100644
2179     --- a/include/linux/cpuset.h
2180     +++ b/include/linux/cpuset.h
2181     @@ -16,6 +16,19 @@
2182    
2183     #ifdef CONFIG_CPUSETS
2184    
2185     +/*
2186     + * Static branch rewrites can happen in an arbitrary order for a given
2187     + * key. In code paths where we need to loop with read_mems_allowed_begin() and
2188     + * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
2189     + * to ensure that begin() always gets rewritten before retry() in the
2190     + * disabled -> enabled transition. If not, then if local irqs are disabled
2191     + * around the loop, we can deadlock since retry() would always be
2192     + * comparing the latest value of the mems_allowed seqcount against 0 as
2193     + * begin() still would see cpusets_enabled() as false. The enabled -> disabled
2194     + * transition should happen in reverse order for the same reasons (want to stop
2195     + * looking at real value of mems_allowed.sequence in retry() first).
2196     + */
2197     +extern struct static_key_false cpusets_pre_enable_key;
2198     extern struct static_key_false cpusets_enabled_key;
2199     static inline bool cpusets_enabled(void)
2200     {
2201     @@ -30,12 +43,14 @@ static inline int nr_cpusets(void)
2202    
2203     static inline void cpuset_inc(void)
2204     {
2205     + static_branch_inc(&cpusets_pre_enable_key);
2206     static_branch_inc(&cpusets_enabled_key);
2207     }
2208    
2209     static inline void cpuset_dec(void)
2210     {
2211     static_branch_dec(&cpusets_enabled_key);
2212     + static_branch_dec(&cpusets_pre_enable_key);
2213     }
2214    
2215     extern int cpuset_init(void);
2216     @@ -113,7 +128,7 @@ extern void cpuset_print_current_mems_allowed(void);
2217     */
2218     static inline unsigned int read_mems_allowed_begin(void)
2219     {
2220     - if (!cpusets_enabled())
2221     + if (!static_branch_unlikely(&cpusets_pre_enable_key))
2222     return 0;
2223    
2224     return read_seqcount_begin(&current->mems_allowed_seq);
2225     @@ -127,7 +142,7 @@ static inline unsigned int read_mems_allowed_begin(void)
2226     */
2227     static inline bool read_mems_allowed_retry(unsigned int seq)
2228     {
2229     - if (!cpusets_enabled())
2230     + if (!static_branch_unlikely(&cpusets_enabled_key))
2231     return false;
2232    
2233     return read_seqcount_retry(&current->mems_allowed_seq, seq);
2234     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2235     index 08d947fc4c59..e8471c2ca83a 100644
2236     --- a/include/linux/mm_types.h
2237     +++ b/include/linux/mm_types.h
2238     @@ -507,6 +507,10 @@ struct mm_struct {
2239     * PROT_NONE or PROT_NUMA mapped page.
2240     */
2241     bool tlb_flush_pending;
2242     +#endif
2243     +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2244     + /* See flush_tlb_batched_pending() */
2245     + bool tlb_flush_batched;
2246     #endif
2247     struct uprobes_state uprobes_state;
2248     #ifdef CONFIG_X86_INTEL_MPX
2249     diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
2250     index beb1e10f446e..3bf867a0c3b3 100644
2251     --- a/include/linux/nfs_xdr.h
2252     +++ b/include/linux/nfs_xdr.h
2253     @@ -1199,7 +1199,7 @@ struct nfs41_state_protection {
2254    
2255     struct nfs41_exchange_id_args {
2256     struct nfs_client *client;
2257     - nfs4_verifier *verifier;
2258     + nfs4_verifier verifier;
2259     u32 flags;
2260     struct nfs41_state_protection state_protect;
2261     };
2262     diff --git a/include/linux/property.h b/include/linux/property.h
2263     index 856e50b2140c..338f9b76914b 100644
2264     --- a/include/linux/property.h
2265     +++ b/include/linux/property.h
2266     @@ -33,6 +33,8 @@ enum dev_dma_attr {
2267     DEV_DMA_COHERENT,
2268     };
2269    
2270     +struct fwnode_handle *dev_fwnode(struct device *dev);
2271     +
2272     bool device_property_present(struct device *dev, const char *propname);
2273     int device_property_read_u8_array(struct device *dev, const char *propname,
2274     u8 *val, size_t nval);
2275     diff --git a/include/linux/sched.h b/include/linux/sched.h
2276     index f425eb3318ab..14f58cf06054 100644
2277     --- a/include/linux/sched.h
2278     +++ b/include/linux/sched.h
2279     @@ -830,6 +830,16 @@ struct signal_struct {
2280    
2281     #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
2282    
2283     +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
2284     + SIGNAL_STOP_CONTINUED)
2285     +
2286     +static inline void signal_set_stop_flags(struct signal_struct *sig,
2287     + unsigned int flags)
2288     +{
2289     + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
2290     + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
2291     +}
2292     +
2293     /* If true, all threads except ->group_exit_task have pending SIGKILL */
2294     static inline int signal_group_exit(const struct signal_struct *sig)
2295     {
2296     diff --git a/include/linux/slab.h b/include/linux/slab.h
2297     index 084b12bad198..4c5363566815 100644
2298     --- a/include/linux/slab.h
2299     +++ b/include/linux/slab.h
2300     @@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
2301     * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
2302     */
2303     #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
2304     -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
2305     +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
2306     #ifndef KMALLOC_SHIFT_LOW
2307     #define KMALLOC_SHIFT_LOW 3
2308     #endif
2309     @@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
2310     * be allocated from the same page.
2311     */
2312     #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
2313     -#define KMALLOC_SHIFT_MAX 30
2314     +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
2315     #ifndef KMALLOC_SHIFT_LOW
2316     #define KMALLOC_SHIFT_LOW 3
2317     #endif
2318     diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
2319     index fc6e22186405..733a21ef8da4 100644
2320     --- a/include/linux/workqueue.h
2321     +++ b/include/linux/workqueue.h
2322     @@ -311,6 +311,7 @@ enum {
2323    
2324     __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
2325     __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
2326     + __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
2327     __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
2328    
2329     WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
2330     @@ -409,7 +410,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
2331     * Pointer to the allocated workqueue on success, %NULL on failure.
2332     */
2333     #define alloc_ordered_workqueue(fmt, flags, args...) \
2334     - alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
2335     + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
2336     + __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
2337    
2338     #define create_workqueue(name) \
2339     alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
2340     diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
2341     index e0f4109e64c6..c2aa73e5e6bb 100644
2342     --- a/include/net/iw_handler.h
2343     +++ b/include/net/iw_handler.h
2344     @@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
2345     memcpy(stream + lcp_len,
2346     ((char *) &iwe->u) + IW_EV_POINT_OFF,
2347     IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
2348     - memcpy(stream + point_len, extra, iwe->u.data.length);
2349     + if (iwe->u.data.length && extra)
2350     + memcpy(stream + point_len, extra, iwe->u.data.length);
2351     stream += event_len;
2352     }
2353     return stream;
2354     diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
2355     index 31acc3f4f132..61d9ce89d10d 100644
2356     --- a/include/net/sctp/sctp.h
2357     +++ b/include/net/sctp/sctp.h
2358     @@ -460,6 +460,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
2359    
2360     #define _sctp_walk_params(pos, chunk, end, member)\
2361     for (pos.v = chunk->member;\
2362     + (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
2363     + (void *)chunk + end) &&\
2364     pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
2365     ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
2366     pos.v += SCTP_PAD4(ntohs(pos.p->length)))
2367     @@ -470,6 +472,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
2368     #define _sctp_walk_errors(err, chunk_hdr, end)\
2369     for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
2370     sizeof(sctp_chunkhdr_t));\
2371     + ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
2372     + (void *)chunk_hdr + end) &&\
2373     (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
2374     ntohs(err->length) >= sizeof(sctp_errhdr_t); \
2375     err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
2376     diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
2377     index 33b2e75bf2eb..c8132b419148 100644
2378     --- a/include/target/iscsi/iscsi_target_core.h
2379     +++ b/include/target/iscsi/iscsi_target_core.h
2380     @@ -563,6 +563,7 @@ struct iscsi_conn {
2381     #define LOGIN_FLAGS_READ_ACTIVE 1
2382     #define LOGIN_FLAGS_CLOSED 2
2383     #define LOGIN_FLAGS_READY 4
2384     +#define LOGIN_FLAGS_INITIAL_PDU 8
2385     unsigned long login_flags;
2386     struct delayed_work login_work;
2387     struct delayed_work login_cleanup_work;
2388     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2389     index 1fde8eec9529..4c233437ee1a 100644
2390     --- a/kernel/cgroup.c
2391     +++ b/kernel/cgroup.c
2392     @@ -3487,11 +3487,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2393     cgrp->subtree_control &= ~disable;
2394    
2395     ret = cgroup_apply_control(cgrp);
2396     -
2397     cgroup_finalize_control(cgrp, ret);
2398     + if (ret)
2399     + goto out_unlock;
2400    
2401     kernfs_activate(cgrp->kn);
2402     - ret = 0;
2403     out_unlock:
2404     cgroup_kn_unlock(of->kn);
2405     return ret ?: nbytes;
2406     @@ -5718,6 +5718,10 @@ int __init cgroup_init(void)
2407    
2408     if (ss->bind)
2409     ss->bind(init_css_set.subsys[ssid]);
2410     +
2411     + mutex_lock(&cgroup_mutex);
2412     + css_populate_dir(init_css_set.subsys[ssid]);
2413     + mutex_unlock(&cgroup_mutex);
2414     }
2415    
2416     /* init_css_set.subsys[] has been updated, re-hash */
2417     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2418     index 24d175d2b62d..247afb108343 100644
2419     --- a/kernel/cpuset.c
2420     +++ b/kernel/cpuset.c
2421     @@ -61,6 +61,7 @@
2422     #include <linux/cgroup.h>
2423     #include <linux/wait.h>
2424    
2425     +DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
2426     DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
2427    
2428     /* See "Frequency meter" comments, below. */
2429     diff --git a/kernel/signal.c b/kernel/signal.c
2430     index deb04d5983ed..e48668c3c972 100644
2431     --- a/kernel/signal.c
2432     +++ b/kernel/signal.c
2433     @@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
2434     * fresh group stop. Read comment in do_signal_stop() for details.
2435     */
2436     if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2437     - sig->flags = SIGNAL_STOP_STOPPED;
2438     + signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
2439     return true;
2440     }
2441     return false;
2442     @@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
2443     * will take ->siglock, notice SIGNAL_CLD_MASK, and
2444     * notify its parent. See get_signal_to_deliver().
2445     */
2446     - signal->flags = why | SIGNAL_STOP_CONTINUED;
2447     + signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
2448     signal->group_stop_count = 0;
2449     signal->group_exit_code = 0;
2450     }
2451     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
2452     index c611c47de884..944ad64277a6 100644
2453     --- a/kernel/time/timer.c
2454     +++ b/kernel/time/timer.c
2455     @@ -1536,7 +1536,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
2456     base->is_idle = false;
2457     } else {
2458     if (!is_max_delta)
2459     - expires = basem + (nextevt - basej) * TICK_NSEC;
2460     + expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
2461     /*
2462     * If we expect to sleep more than a tick, mark the base idle:
2463     */
2464     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2465     index 479d840db286..776dda02e751 100644
2466     --- a/kernel/workqueue.c
2467     +++ b/kernel/workqueue.c
2468     @@ -3730,8 +3730,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
2469     return -EINVAL;
2470    
2471     /* creating multiple pwqs breaks ordering guarantee */
2472     - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
2473     - return -EINVAL;
2474     + if (!list_empty(&wq->pwqs)) {
2475     + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2476     + return -EINVAL;
2477     +
2478     + wq->flags &= ~__WQ_ORDERED;
2479     + }
2480    
2481     ctx = apply_wqattrs_prepare(wq, attrs);
2482     if (!ctx)
2483     @@ -3915,6 +3919,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
2484     struct workqueue_struct *wq;
2485     struct pool_workqueue *pwq;
2486    
2487     + /*
2488     + * Unbound && max_active == 1 used to imply ordered, which is no
2489     + * longer the case on NUMA machines due to per-node pools. While
2490     + * alloc_ordered_workqueue() is the right way to create an ordered
2491     + * workqueue, keep the previous behavior to avoid subtle breakages
2492     + * on NUMA.
2493     + */
2494     + if ((flags & WQ_UNBOUND) && max_active == 1)
2495     + flags |= __WQ_ORDERED;
2496     +
2497     /* see the comment above the definition of WQ_POWER_EFFICIENT */
2498     if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
2499     flags |= WQ_UNBOUND;
2500     @@ -4103,13 +4117,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2501     struct pool_workqueue *pwq;
2502    
2503     /* disallow meddling with max_active for ordered workqueues */
2504     - if (WARN_ON(wq->flags & __WQ_ORDERED))
2505     + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2506     return;
2507    
2508     max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2509    
2510     mutex_lock(&wq->mutex);
2511    
2512     + wq->flags &= ~__WQ_ORDERED;
2513     wq->saved_max_active = max_active;
2514    
2515     for_each_pwq(pwq, wq)
2516     @@ -5214,7 +5229,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
2517     * attributes breaks ordering guarantee. Disallow exposing ordered
2518     * workqueues.
2519     */
2520     - if (WARN_ON(wq->flags & __WQ_ORDERED))
2521     + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2522     return -EINVAL;
2523    
2524     wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
2525     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2526     index a6c8db1d62f6..f60e67217f18 100644
2527     --- a/lib/Kconfig.debug
2528     +++ b/lib/Kconfig.debug
2529     @@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
2530    
2531     config DEBUG_INFO_SPLIT
2532     bool "Produce split debuginfo in .dwo files"
2533     - depends on DEBUG_INFO
2534     + depends on DEBUG_INFO && !FRV
2535     help
2536     Generate debug info into separate .dwo files. This significantly
2537     reduces the build directory size for builds with DEBUG_INFO,
2538     diff --git a/mm/internal.h b/mm/internal.h
2539     index 537ac9951f5f..34a5459e5989 100644
2540     --- a/mm/internal.h
2541     +++ b/mm/internal.h
2542     @@ -472,6 +472,7 @@ struct tlbflush_unmap_batch;
2543     #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2544     void try_to_unmap_flush(void);
2545     void try_to_unmap_flush_dirty(void);
2546     +void flush_tlb_batched_pending(struct mm_struct *mm);
2547     #else
2548     static inline void try_to_unmap_flush(void)
2549     {
2550     @@ -479,7 +480,9 @@ static inline void try_to_unmap_flush(void)
2551     static inline void try_to_unmap_flush_dirty(void)
2552     {
2553     }
2554     -
2555     +static inline void flush_tlb_batched_pending(struct mm_struct *mm)
2556     +{
2557     +}
2558     #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2559    
2560     extern const struct trace_print_flags pageflag_names[];
2561     diff --git a/mm/madvise.c b/mm/madvise.c
2562     index 93fb63e88b5e..253b1533fba5 100644
2563     --- a/mm/madvise.c
2564     +++ b/mm/madvise.c
2565     @@ -21,6 +21,7 @@
2566     #include <linux/swap.h>
2567     #include <linux/swapops.h>
2568     #include <linux/mmu_notifier.h>
2569     +#include "internal.h"
2570    
2571     #include <asm/tlb.h>
2572    
2573     @@ -282,6 +283,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
2574     return 0;
2575    
2576     orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2577     + flush_tlb_batched_pending(mm);
2578     arch_enter_lazy_mmu_mode();
2579     for (; addr != end; pte++, addr += PAGE_SIZE) {
2580     ptent = *pte;
2581     diff --git a/mm/memory.c b/mm/memory.c
2582     index e6a5a1f20492..9bf3da0d0e14 100644
2583     --- a/mm/memory.c
2584     +++ b/mm/memory.c
2585     @@ -1124,6 +1124,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
2586     init_rss_vec(rss);
2587     start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2588     pte = start_pte;
2589     + flush_tlb_batched_pending(mm);
2590     arch_enter_lazy_mmu_mode();
2591     do {
2592     pte_t ptent = *pte;
2593     diff --git a/mm/mprotect.c b/mm/mprotect.c
2594     index 11936526b08b..ae740c9b1f9b 100644
2595     --- a/mm/mprotect.c
2596     +++ b/mm/mprotect.c
2597     @@ -74,6 +74,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2598     if (!pte)
2599     return 0;
2600    
2601     + flush_tlb_batched_pending(vma->vm_mm);
2602     arch_enter_lazy_mmu_mode();
2603     do {
2604     oldpte = *pte;
2605     diff --git a/mm/mremap.c b/mm/mremap.c
2606     index 30d7d2482eea..15976716dd40 100644
2607     --- a/mm/mremap.c
2608     +++ b/mm/mremap.c
2609     @@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
2610     new_ptl = pte_lockptr(mm, new_pmd);
2611     if (new_ptl != old_ptl)
2612     spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2613     + flush_tlb_batched_pending(vma->vm_mm);
2614     arch_enter_lazy_mmu_mode();
2615    
2616     for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
2617     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2618     index 56df8c24689d..77b797c2d094 100644
2619     --- a/mm/page_alloc.c
2620     +++ b/mm/page_alloc.c
2621     @@ -1875,14 +1875,14 @@ int move_freepages(struct zone *zone,
2622     #endif
2623    
2624     for (page = start_page; page <= end_page;) {
2625     - /* Make sure we are not inadvertently changing nodes */
2626     - VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2627     -
2628     if (!pfn_valid_within(page_to_pfn(page))) {
2629     page++;
2630     continue;
2631     }
2632    
2633     + /* Make sure we are not inadvertently changing nodes */
2634     + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2635     +
2636     if (!PageBuddy(page)) {
2637     page++;
2638     continue;
2639     @@ -6445,8 +6445,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
2640     }
2641    
2642     if (pages && s)
2643     - pr_info("Freeing %s memory: %ldK (%p - %p)\n",
2644     - s, pages << (PAGE_SHIFT - 10), start, end);
2645     + pr_info("Freeing %s memory: %ldK\n",
2646     + s, pages << (PAGE_SHIFT - 10));
2647    
2648     return pages;
2649     }
2650     diff --git a/mm/rmap.c b/mm/rmap.c
2651     index cd37c1c7e21b..94488b0362f8 100644
2652     --- a/mm/rmap.c
2653     +++ b/mm/rmap.c
2654     @@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
2655     cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
2656     tlb_ubc->flush_required = true;
2657    
2658     + /*
2659     + * Ensure compiler does not re-order the setting of tlb_flush_batched
2660     + * before the PTE is cleared.
2661     + */
2662     + barrier();
2663     + mm->tlb_flush_batched = true;
2664     +
2665     /*
2666     * If the PTE was dirty then it's best to assume it's writable. The
2667     * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
2668     @@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
2669    
2670     return should_defer;
2671     }
2672     +
2673     +/*
2674     + * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
2675     + * releasing the PTL if TLB flushes are batched. It's possible for a parallel
2676     + * operation such as mprotect or munmap to race between reclaim unmapping
2677     + * the page and flushing the page. If this race occurs, it potentially allows
2678     + * access to data via a stale TLB entry. Tracking all mm's that have TLB
2679     + * batching in flight would be expensive during reclaim so instead track
2680     + * whether TLB batching occurred in the past and if so then do a flush here
2681     + * if required. This will cost one additional flush per reclaim cycle paid
2682     + * by the first operation at risk such as mprotect and mumap.
2683     + *
2684     + * This must be called under the PTL so that an access to tlb_flush_batched
2685     + * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
2686     + * via the PTL.
2687     + */
2688     +void flush_tlb_batched_pending(struct mm_struct *mm)
2689     +{
2690     + if (mm->tlb_flush_batched) {
2691     + flush_tlb_mm(mm);
2692     +
2693     + /*
2694     + * Do not allow the compiler to re-order the clearing of
2695     + * tlb_flush_batched before the tlb is flushed.
2696     + */
2697     + barrier();
2698     + mm->tlb_flush_batched = false;
2699     + }
2700     +}
2701     #else
2702     static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
2703     struct page *page, bool writable)
2704     diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
2705     index b94b1d293506..151e047ce072 100644
2706     --- a/net/core/dev_ioctl.c
2707     +++ b/net/core/dev_ioctl.c
2708     @@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
2709    
2710     if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2711     return -EFAULT;
2712     + ifr.ifr_name[IFNAMSIZ-1] = 0;
2713    
2714     error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
2715     if (error)
2716     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2717     index 9c6fd7f83a4a..4d2629781e8b 100644
2718     --- a/net/core/rtnetlink.c
2719     +++ b/net/core/rtnetlink.c
2720     @@ -1965,7 +1965,8 @@ static int do_setlink(const struct sk_buff *skb,
2721     struct sockaddr *sa;
2722     int len;
2723    
2724     - len = sizeof(sa_family_t) + dev->addr_len;
2725     + len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2726     + sizeof(*sa));
2727     sa = kmalloc(len, GFP_KERNEL);
2728     if (!sa) {
2729     err = -ENOMEM;
2730     diff --git a/net/dccp/feat.c b/net/dccp/feat.c
2731     index 1704948e6a12..f227f002c73d 100644
2732     --- a/net/dccp/feat.c
2733     +++ b/net/dccp/feat.c
2734     @@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
2735     * singleton values (which always leads to failure).
2736     * These settings can still (later) be overridden via sockopts.
2737     */
2738     - if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
2739     - ccid_get_builtin_ccids(&rx.val, &rx.len))
2740     + if (ccid_get_builtin_ccids(&tx.val, &tx.len))
2741     return -ENOBUFS;
2742     + if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
2743     + kfree(tx.val);
2744     + return -ENOBUFS;
2745     + }
2746    
2747     if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
2748     !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
2749     diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
2750     index 86b0933ecd45..8fc160098e11 100644
2751     --- a/net/dccp/ipv4.c
2752     +++ b/net/dccp/ipv4.c
2753     @@ -637,6 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
2754     goto drop_and_free;
2755    
2756     inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
2757     + reqsk_put(req);
2758     return 0;
2759    
2760     drop_and_free:
2761     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
2762     index 2ac9d2a1aaab..28e8252cc5ea 100644
2763     --- a/net/dccp/ipv6.c
2764     +++ b/net/dccp/ipv6.c
2765     @@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
2766     goto drop_and_free;
2767    
2768     inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
2769     + reqsk_put(req);
2770     return 0;
2771    
2772     drop_and_free:
2773     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2774     index 3d92534c4450..968d8e165e3d 100644
2775     --- a/net/ipv4/fib_frontend.c
2776     +++ b/net/ipv4/fib_frontend.c
2777     @@ -1319,13 +1319,14 @@ static struct pernet_operations fib_net_ops = {
2778    
2779     void __init ip_fib_init(void)
2780     {
2781     - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2782     - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2783     - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2784     + fib_trie_init();
2785    
2786     register_pernet_subsys(&fib_net_ops);
2787     +
2788     register_netdevice_notifier(&fib_netdev_notifier);
2789     register_inetaddr_notifier(&fib_inetaddr_notifier);
2790    
2791     - fib_trie_init();
2792     + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2793     + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2794     + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2795     }
2796     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2797     index e5c1dbef3626..06215ba88b93 100644
2798     --- a/net/ipv4/ip_output.c
2799     +++ b/net/ipv4/ip_output.c
2800     @@ -936,7 +936,8 @@ static int __ip_append_data(struct sock *sk,
2801     csummode = CHECKSUM_PARTIAL;
2802    
2803     cork->length += length;
2804     - if (((length > mtu) || (skb && skb_is_gso(skb))) &&
2805     + if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
2806     + (skb && skb_is_gso(skb))) &&
2807     (sk->sk_protocol == IPPROTO_UDP) &&
2808     (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2809     (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
2810     diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
2811     index fd8220213afc..146d86105183 100644
2812     --- a/net/ipv4/netfilter/nf_reject_ipv4.c
2813     +++ b/net/ipv4/netfilter/nf_reject_ipv4.c
2814     @@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
2815     /* ip_route_me_harder expects skb->dst to be set */
2816     skb_dst_set_noref(nskb, skb_dst(oldskb));
2817    
2818     + nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
2819     +
2820     skb_reserve(nskb, LL_MAX_HEADER);
2821     niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
2822     ip4_dst_hoplimit(skb_dst(nskb)));
2823     diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
2824     index e3c4043c27de..b6f710d515d0 100644
2825     --- a/net/ipv4/syncookies.c
2826     +++ b/net/ipv4/syncookies.c
2827     @@ -334,6 +334,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
2828     treq = tcp_rsk(req);
2829     treq->rcv_isn = ntohl(th->seq) - 1;
2830     treq->snt_isn = cookie;
2831     + treq->txhash = net_tx_rndhash();
2832     req->mss = mss;
2833     ireq->ir_num = ntohs(th->dest);
2834     ireq->ir_rmt_port = th->source;
2835     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
2836     index 80bc36b25de2..566cfc50f7cf 100644
2837     --- a/net/ipv4/sysctl_net_ipv4.c
2838     +++ b/net/ipv4/sysctl_net_ipv4.c
2839     @@ -958,7 +958,7 @@ static struct ctl_table ipv4_net_table[] = {
2840     .data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
2841     .maxlen = sizeof(unsigned int),
2842     .mode = 0644,
2843     - .proc_handler = proc_dointvec,
2844     + .proc_handler = proc_douintvec,
2845     },
2846     #ifdef CONFIG_IP_ROUTE_MULTIPATH
2847     {
2848     diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
2849     index 0ea66c2c9344..cb8db347c680 100644
2850     --- a/net/ipv4/tcp_bbr.c
2851     +++ b/net/ipv4/tcp_bbr.c
2852     @@ -83,7 +83,8 @@ struct bbr {
2853     cwnd_gain:10, /* current gain for setting cwnd */
2854     full_bw_cnt:3, /* number of rounds without large bw gains */
2855     cycle_idx:3, /* current index in pacing_gain cycle array */
2856     - unused_b:6;
2857     + has_seen_rtt:1, /* have we seen an RTT sample yet? */
2858     + unused_b:5;
2859     u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
2860     u32 full_bw; /* recent bw, to estimate if pipe is full */
2861     };
2862     @@ -182,6 +183,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
2863     return rate >> BW_SCALE;
2864     }
2865    
2866     +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
2867     +static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
2868     +{
2869     + u64 rate = bw;
2870     +
2871     + rate = bbr_rate_bytes_per_sec(sk, rate, gain);
2872     + rate = min_t(u64, rate, sk->sk_max_pacing_rate);
2873     + return rate;
2874     +}
2875     +
2876     +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
2877     +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
2878     +{
2879     + struct tcp_sock *tp = tcp_sk(sk);
2880     + struct bbr *bbr = inet_csk_ca(sk);
2881     + u64 bw;
2882     + u32 rtt_us;
2883     +
2884     + if (tp->srtt_us) { /* any RTT sample yet? */
2885     + rtt_us = max(tp->srtt_us >> 3, 1U);
2886     + bbr->has_seen_rtt = 1;
2887     + } else { /* no RTT sample yet */
2888     + rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
2889     + }
2890     + bw = (u64)tp->snd_cwnd * BW_UNIT;
2891     + do_div(bw, rtt_us);
2892     + sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
2893     +}
2894     +
2895     /* Pace using current bw estimate and a gain factor. In order to help drive the
2896     * network toward lower queues while maintaining high utilization and low
2897     * latency, the average pacing rate aims to be slightly (~1%) lower than the
2898     @@ -191,12 +221,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
2899     */
2900     static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
2901     {
2902     + struct tcp_sock *tp = tcp_sk(sk);
2903     struct bbr *bbr = inet_csk_ca(sk);
2904     - u64 rate = bw;
2905     + u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
2906    
2907     - rate = bbr_rate_bytes_per_sec(sk, rate, gain);
2908     - rate = min_t(u64, rate, sk->sk_max_pacing_rate);
2909     - if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
2910     + if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
2911     + bbr_init_pacing_rate_from_rtt(sk);
2912     + if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
2913     sk->sk_pacing_rate = rate;
2914     }
2915    
2916     @@ -769,7 +800,6 @@ static void bbr_init(struct sock *sk)
2917     {
2918     struct tcp_sock *tp = tcp_sk(sk);
2919     struct bbr *bbr = inet_csk_ca(sk);
2920     - u64 bw;
2921    
2922     bbr->prior_cwnd = 0;
2923     bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
2924     @@ -785,11 +815,8 @@ static void bbr_init(struct sock *sk)
2925    
2926     minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
2927    
2928     - /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
2929     - bw = (u64)tp->snd_cwnd * BW_UNIT;
2930     - do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
2931     - sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
2932     - bbr_set_pacing_rate(sk, bw, bbr_high_gain);
2933     + bbr->has_seen_rtt = 0;
2934     + bbr_init_pacing_rate_from_rtt(sk);
2935    
2936     bbr->restore_cwnd = 0;
2937     bbr->round_start = 0;
2938     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2939     index 5a4b8e7bcedd..a5cdf2a23609 100644
2940     --- a/net/ipv6/ip6_output.c
2941     +++ b/net/ipv6/ip6_output.c
2942     @@ -662,8 +662,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2943     *prevhdr = NEXTHDR_FRAGMENT;
2944     tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
2945     if (!tmp_hdr) {
2946     - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2947     - IPSTATS_MIB_FRAGFAILS);
2948     err = -ENOMEM;
2949     goto fail;
2950     }
2951     @@ -782,8 +780,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
2952     frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
2953     hroom + troom, GFP_ATOMIC);
2954     if (!frag) {
2955     - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2956     - IPSTATS_MIB_FRAGFAILS);
2957     err = -ENOMEM;
2958     goto fail;
2959     }
2960     @@ -1376,7 +1372,7 @@ static int __ip6_append_data(struct sock *sk,
2961     */
2962    
2963     cork->length += length;
2964     - if ((((length + fragheaderlen) > mtu) ||
2965     + if ((((length + (skb ? skb->len : headersize)) > mtu) ||
2966     (skb && skb_is_gso(skb))) &&
2967     (sk->sk_protocol == IPPROTO_UDP) &&
2968     (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2969     diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
2970     index 10090400c72f..eedee5d108d9 100644
2971     --- a/net/ipv6/netfilter/nf_reject_ipv6.c
2972     +++ b/net/ipv6/netfilter/nf_reject_ipv6.c
2973     @@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
2974     fl6.fl6_sport = otcph->dest;
2975     fl6.fl6_dport = otcph->source;
2976     fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
2977     + fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
2978     security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
2979     dst = ip6_route_output(net, NULL, &fl6);
2980     if (dst->error) {
2981     @@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
2982    
2983     skb_dst_set(nskb, dst);
2984    
2985     + nskb->mark = fl6.flowi6_mark;
2986     +
2987     skb_reserve(nskb, hh_len + dst->header_len);
2988     ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
2989     ip6_dst_hoplimit(dst));
2990     diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
2991     index e9065b8d3af8..abb2c307fbe8 100644
2992     --- a/net/ipv6/output_core.c
2993     +++ b/net/ipv6/output_core.c
2994     @@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
2995    
2996     int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
2997     {
2998     - u16 offset = sizeof(struct ipv6hdr);
2999     + unsigned int offset = sizeof(struct ipv6hdr);
3000     unsigned int packet_len = skb_tail_pointer(skb) -
3001     skb_network_header(skb);
3002     int found_rhdr = 0;
3003     @@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3004    
3005     while (offset <= packet_len) {
3006     struct ipv6_opt_hdr *exthdr;
3007     + unsigned int len;
3008    
3009     switch (**nexthdr) {
3010    
3011     @@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
3012    
3013     exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
3014     offset);
3015     - offset += ipv6_optlen(exthdr);
3016     + len = ipv6_optlen(exthdr);
3017     + if (len + offset >= IPV6_MAXPLEN)
3018     + return -EINVAL;
3019     + offset += len;
3020     *nexthdr = &exthdr->nexthdr;
3021     }
3022    
3023     diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
3024     index 59c483937aec..7a86433d8896 100644
3025     --- a/net/ipv6/syncookies.c
3026     +++ b/net/ipv6/syncookies.c
3027     @@ -209,6 +209,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
3028     treq->snt_synack.v64 = 0;
3029     treq->rcv_isn = ntohl(th->seq) - 1;
3030     treq->snt_isn = cookie;
3031     + treq->txhash = net_tx_rndhash();
3032    
3033     /*
3034     * We need to lookup the dst_entry to get the correct window size.
3035     diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
3036     index 48386bff8b4e..b28e45b691de 100644
3037     --- a/net/openvswitch/conntrack.c
3038     +++ b/net/openvswitch/conntrack.c
3039     @@ -1088,8 +1088,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3040    
3041     nla_for_each_nested(a, attr, rem) {
3042     int type = nla_type(a);
3043     - int maxlen = ovs_ct_attr_lens[type].maxlen;
3044     - int minlen = ovs_ct_attr_lens[type].minlen;
3045     + int maxlen;
3046     + int minlen;
3047    
3048     if (type > OVS_CT_ATTR_MAX) {
3049     OVS_NLERR(log,
3050     @@ -1097,6 +1097,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
3051     type, OVS_CT_ATTR_MAX);
3052     return -EINVAL;
3053     }
3054     +
3055     + maxlen = ovs_ct_attr_lens[type].maxlen;
3056     + minlen = ovs_ct_attr_lens[type].minlen;
3057     if (nla_len(a) < minlen || nla_len(a) > maxlen) {
3058     OVS_NLERR(log,
3059     "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
3060     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3061     index 6a563e6e24de..365c83fcee02 100644
3062     --- a/net/packet/af_packet.c
3063     +++ b/net/packet/af_packet.c
3064     @@ -4322,7 +4322,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3065     register_prot_hook(sk);
3066     }
3067     spin_unlock(&po->bind_lock);
3068     - if (closing && (po->tp_version > TPACKET_V2)) {
3069     + if (pg_vec && (po->tp_version > TPACKET_V2)) {
3070     /* Because we don't support block-based V3 on tx-ring */
3071     if (!tx_ring)
3072     prb_shutdown_retire_blk_timer(po, rb_queue);
3073     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3074     index bb1aad39d987..6f337f00ba58 100644
3075     --- a/sound/pci/hda/patch_realtek.c
3076     +++ b/sound/pci/hda/patch_realtek.c
3077     @@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3078     SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
3079     SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
3080     SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
3081     + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
3082     SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
3083     SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
3084    
3085     diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
3086     index 10c2a564a715..1ac96ef9ee20 100644
3087     --- a/sound/soc/codecs/rt5645.c
3088     +++ b/sound/soc/codecs/rt5645.c
3089     @@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3090     }
3091     }
3092    
3093     + regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
3094     + RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
3095     +
3096     if (rt5645->pdata.jd_invert) {
3097     regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
3098     RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
3099     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3100     index 21c3ef01c438..80088c98ce27 100644
3101     --- a/sound/soc/soc-pcm.c
3102     +++ b/sound/soc/soc-pcm.c
3103     @@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
3104     dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
3105     be->dai_link->name, event, dir);
3106    
3107     + if ((event == SND_SOC_DAPM_STREAM_STOP) &&
3108     + (be->dpcm[dir].users >= 1))
3109     + continue;
3110     +
3111     snd_soc_dapm_stream_event(be, dir, event);
3112     }
3113