Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0145-4.19.46-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3424 - (hide annotations) (download)
Fri Aug 2 11:47:53 2019 UTC (4 years, 9 months ago) by niro
File size: 141586 byte(s)
-linux-4.19.46
1 niro 3424 diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
2     index 7b7b845c490a..32b5186be412 100644
3     --- a/Documentation/filesystems/porting
4     +++ b/Documentation/filesystems/porting
5     @@ -622,3 +622,8 @@ in your dentry operations instead.
6     alloc_file_clone(file, flags, ops) does not affect any caller's references.
7     On success you get a new struct file sharing the mount/dentry with the
8     original, on failure - ERR_PTR().
9     +--
10     +[mandatory]
11     + DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
12     + default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
13     + business doing so.
14     diff --git a/Makefile b/Makefile
15     index b21dd3866b63..5383dd317d59 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,7 +1,7 @@
19     # SPDX-License-Identifier: GPL-2.0
20     VERSION = 4
21     PATCHLEVEL = 19
22     -SUBLEVEL = 45
23     +SUBLEVEL = 46
24     EXTRAVERSION =
25     NAME = "People's Front"
26    
27     diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
28     index 413863508f6f..d67fb64e908c 100644
29     --- a/arch/mips/kernel/perf_event_mipsxx.c
30     +++ b/arch/mips/kernel/perf_event_mipsxx.c
31     @@ -64,17 +64,11 @@ struct mips_perf_event {
32     #define CNTR_EVEN 0x55555555
33     #define CNTR_ODD 0xaaaaaaaa
34     #define CNTR_ALL 0xffffffff
35     -#ifdef CONFIG_MIPS_MT_SMP
36     enum {
37     T = 0,
38     V = 1,
39     P = 2,
40     } range;
41     -#else
42     - #define T
43     - #define V
44     - #define P
45     -#endif
46     };
47    
48     static struct mips_perf_event raw_event;
49     @@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
50     {
51     struct perf_event *event = container_of(evt, struct perf_event, hw);
52     struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
53     -#ifdef CONFIG_MIPS_MT_SMP
54     unsigned int range = evt->event_base >> 24;
55     -#endif /* CONFIG_MIPS_MT_SMP */
56    
57     WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
58    
59     @@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
60     /* Make sure interrupt enabled. */
61     MIPS_PERFCTRL_IE;
62    
63     -#ifdef CONFIG_CPU_BMIPS5000
64     - {
65     + if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
66     /* enable the counter for the calling thread */
67     cpuc->saved_ctrl[idx] |=
68     (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
69     - }
70     -#else
71     -#ifdef CONFIG_MIPS_MT_SMP
72     - if (range > V) {
73     + } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
74     /* The counter is processor wide. Set it up to count all TCs. */
75     pr_debug("Enabling perf counter for all TCs\n");
76     cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
77     - } else
78     -#endif /* CONFIG_MIPS_MT_SMP */
79     - {
80     + } else {
81     unsigned int cpu, ctrl;
82    
83     /*
84     @@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
85     cpuc->saved_ctrl[idx] |= ctrl;
86     pr_debug("Enabling perf counter for CPU%d\n", cpu);
87     }
88     -#endif /* CONFIG_CPU_BMIPS5000 */
89     /*
90     * We do not actually let the counter run. Leave it until start().
91     */
92     diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
93     index 5aba20fa48aa..e8b798fd0cf0 100644
94     --- a/arch/parisc/boot/compressed/head.S
95     +++ b/arch/parisc/boot/compressed/head.S
96     @@ -22,7 +22,7 @@
97     __HEAD
98    
99     ENTRY(startup)
100     - .level LEVEL
101     + .level PA_ASM_LEVEL
102    
103     #define PSW_W_SM 0x200
104     #define PSW_W_BIT 36
105     @@ -63,7 +63,7 @@ $bss_loop:
106     load32 BOOTADDR(decompress_kernel),%r3
107    
108     #ifdef CONFIG_64BIT
109     - .level LEVEL
110     + .level PA_ASM_LEVEL
111     ssm PSW_W_SM, %r0 /* set W-bit */
112     depdi 0, 31, 32, %r3
113     #endif
114     @@ -72,7 +72,7 @@ $bss_loop:
115    
116     startup_continue:
117     #ifdef CONFIG_64BIT
118     - .level LEVEL
119     + .level PA_ASM_LEVEL
120     rsm PSW_W_SM, %r0 /* clear W-bit */
121     #endif
122    
123     diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
124     index e9c6385ef0d1..6f30fa5bdaed 100644
125     --- a/arch/parisc/include/asm/assembly.h
126     +++ b/arch/parisc/include/asm/assembly.h
127     @@ -61,14 +61,14 @@
128     #define LDCW ldcw,co
129     #define BL b,l
130     # ifdef CONFIG_64BIT
131     -# define LEVEL 2.0w
132     +# define PA_ASM_LEVEL 2.0w
133     # else
134     -# define LEVEL 2.0
135     +# define PA_ASM_LEVEL 2.0
136     # endif
137     #else
138     #define LDCW ldcw
139     #define BL bl
140     -#define LEVEL 1.1
141     +#define PA_ASM_LEVEL 1.1
142     #endif
143    
144     #ifdef __ASSEMBLY__
145     diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
146     index fbb4e43fda05..f56cbab64ac1 100644
147     --- a/arch/parisc/kernel/head.S
148     +++ b/arch/parisc/kernel/head.S
149     @@ -22,7 +22,7 @@
150     #include <linux/linkage.h>
151     #include <linux/init.h>
152    
153     - .level LEVEL
154     + .level PA_ASM_LEVEL
155    
156     __INITDATA
157     ENTRY(boot_args)
158     @@ -258,7 +258,7 @@ stext_pdc_ret:
159     ldo R%PA(fault_vector_11)(%r10),%r10
160    
161     $is_pa20:
162     - .level LEVEL /* restore 1.1 || 2.0w */
163     + .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
164     #endif /*!CONFIG_64BIT*/
165     load32 PA(fault_vector_20),%r10
166    
167     diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
168     index 841db71958cd..97c206734e24 100644
169     --- a/arch/parisc/kernel/process.c
170     +++ b/arch/parisc/kernel/process.c
171     @@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
172     */
173    
174     int running_on_qemu __read_mostly;
175     +EXPORT_SYMBOL(running_on_qemu);
176    
177     void __cpuidle arch_cpu_idle_dead(void)
178     {
179     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
180     index f453997a7b8f..61a647a55c69 100644
181     --- a/arch/parisc/kernel/syscall.S
182     +++ b/arch/parisc/kernel/syscall.S
183     @@ -48,7 +48,7 @@ registers).
184     */
185     #define KILL_INSN break 0,0
186    
187     - .level LEVEL
188     + .level PA_ASM_LEVEL
189    
190     .text
191    
192     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
193     index 585bbc5b3216..c90e00db5c13 100644
194     --- a/arch/x86/entry/entry_64.S
195     +++ b/arch/x86/entry/entry_64.S
196     @@ -905,7 +905,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
197     */
198     #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
199    
200     -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
201     +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
202     ENTRY(\sym)
203     UNWIND_HINT_IRET_REGS offset=\has_error_code*8
204    
205     @@ -925,6 +925,20 @@ ENTRY(\sym)
206     jnz .Lfrom_usermode_switch_stack_\@
207     .endif
208    
209     + .if \create_gap == 1
210     + /*
211     + * If coming from kernel space, create a 6-word gap to allow the
212     + * int3 handler to emulate a call instruction.
213     + */
214     + testb $3, CS-ORIG_RAX(%rsp)
215     + jnz .Lfrom_usermode_no_gap_\@
216     + .rept 6
217     + pushq 5*8(%rsp)
218     + .endr
219     + UNWIND_HINT_IRET_REGS offset=8
220     +.Lfrom_usermode_no_gap_\@:
221     + .endif
222     +
223     .if \paranoid
224     call paranoid_entry
225     .else
226     @@ -1154,7 +1168,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
227     #endif /* CONFIG_HYPERV */
228    
229     idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
230     -idtentry int3 do_int3 has_error_code=0
231     +idtentry int3 do_int3 has_error_code=0 create_gap=1
232     idtentry stack_segment do_stack_segment has_error_code=1
233    
234     #ifdef CONFIG_XEN
235     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
236     index a759e59990fb..09c53bcbd497 100644
237     --- a/arch/x86/events/intel/core.c
238     +++ b/arch/x86/events/intel/core.c
239     @@ -2074,15 +2074,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
240     cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
241     cpuc->intel_cp_status &= ~(1ull << hwc->idx);
242    
243     - if (unlikely(event->attr.precise_ip))
244     - intel_pmu_pebs_disable(event);
245     -
246     if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
247     intel_pmu_disable_fixed(hwc);
248     return;
249     }
250    
251     x86_pmu_disable_event(event);
252     +
253     + /*
254     + * Needs to be called after x86_pmu_disable_event,
255     + * so we don't trigger the event without PEBS bit set.
256     + */
257     + if (unlikely(event->attr.precise_ip))
258     + intel_pmu_pebs_disable(event);
259     }
260    
261     static void intel_pmu_del_event(struct perf_event *event)
262     diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
263     index e85ff65c43c3..05861cc08787 100644
264     --- a/arch/x86/include/asm/text-patching.h
265     +++ b/arch/x86/include/asm/text-patching.h
266     @@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs);
267     extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
268     extern int after_bootmem;
269    
270     +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
271     +{
272     + regs->ip = ip;
273     +}
274     +
275     +#define INT3_INSN_SIZE 1
276     +#define CALL_INSN_SIZE 5
277     +
278     +#ifdef CONFIG_X86_64
279     +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
280     +{
281     + /*
282     + * The int3 handler in entry_64.S adds a gap between the
283     + * stack where the break point happened, and the saving of
284     + * pt_regs. We can extend the original stack because of
285     + * this gap. See the idtentry macro's create_gap option.
286     + */
287     + regs->sp -= sizeof(unsigned long);
288     + *(unsigned long *)regs->sp = val;
289     +}
290     +
291     +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
292     +{
293     + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
294     + int3_emulate_jmp(regs, func);
295     +}
296     +#endif
297     +
298     #endif /* _ASM_X86_TEXT_PATCHING_H */
299     diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
300     index 7ee8067cbf45..4d2a401c178b 100644
301     --- a/arch/x86/kernel/ftrace.c
302     +++ b/arch/x86/kernel/ftrace.c
303     @@ -29,6 +29,7 @@
304     #include <asm/kprobes.h>
305     #include <asm/ftrace.h>
306     #include <asm/nops.h>
307     +#include <asm/text-patching.h>
308    
309     #ifdef CONFIG_DYNAMIC_FTRACE
310    
311     @@ -228,6 +229,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
312     }
313    
314     static unsigned long ftrace_update_func;
315     +static unsigned long ftrace_update_func_call;
316    
317     static int update_ftrace_func(unsigned long ip, void *new)
318     {
319     @@ -256,6 +258,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
320     unsigned char *new;
321     int ret;
322    
323     + ftrace_update_func_call = (unsigned long)func;
324     +
325     new = ftrace_call_replace(ip, (unsigned long)func);
326     ret = update_ftrace_func(ip, new);
327    
328     @@ -291,13 +295,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
329     if (WARN_ON_ONCE(!regs))
330     return 0;
331    
332     - ip = regs->ip - 1;
333     - if (!ftrace_location(ip) && !is_ftrace_caller(ip))
334     - return 0;
335     + ip = regs->ip - INT3_INSN_SIZE;
336    
337     - regs->ip += MCOUNT_INSN_SIZE - 1;
338     +#ifdef CONFIG_X86_64
339     + if (ftrace_location(ip)) {
340     + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
341     + return 1;
342     + } else if (is_ftrace_caller(ip)) {
343     + if (!ftrace_update_func_call) {
344     + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
345     + return 1;
346     + }
347     + int3_emulate_call(regs, ftrace_update_func_call);
348     + return 1;
349     + }
350     +#else
351     + if (ftrace_location(ip) || is_ftrace_caller(ip)) {
352     + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
353     + return 1;
354     + }
355     +#endif
356    
357     - return 1;
358     + return 0;
359     }
360    
361     static int ftrace_write(unsigned long ip, const char *val, int size)
362     @@ -868,6 +887,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
363    
364     func = ftrace_ops_get_func(ops);
365    
366     + ftrace_update_func_call = (unsigned long)func;
367     +
368     /* Do a safe modify in case the trampoline is executing */
369     new = ftrace_call_replace(ip, (unsigned long)func);
370     ret = update_ftrace_func(ip, new);
371     @@ -964,6 +985,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
372     {
373     unsigned char *new;
374    
375     + ftrace_update_func_call = 0UL;
376     new = ftrace_jmp_replace(ip, (unsigned long)func);
377    
378     return update_ftrace_func(ip, new);
379     diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
380     index 01d209ab5481..229d99605165 100644
381     --- a/arch/x86/kvm/hyperv.c
382     +++ b/arch/x86/kvm/hyperv.c
383     @@ -1291,7 +1291,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
384     flush.address_space, flush.flags);
385    
386     sparse_banks[0] = flush.processor_mask;
387     - all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
388     +
389     + /*
390     + * Work around possible WS2012 bug: it sends hypercalls
391     + * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
392     + * while also expecting us to flush something and crashing if
393     + * we don't. Let's treat processor_mask == 0 same as
394     + * HV_FLUSH_ALL_PROCESSORS.
395     + */
396     + all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
397     + flush.processor_mask == 0;
398     } else {
399     if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
400     sizeof(flush_ex))))
401     diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
402     index 25a972c61b0a..3c19d60316a8 100644
403     --- a/arch/x86/lib/Makefile
404     +++ b/arch/x86/lib/Makefile
405     @@ -6,6 +6,18 @@
406     # Produces uninteresting flaky coverage.
407     KCOV_INSTRUMENT_delay.o := n
408    
409     +# Early boot use of cmdline; don't instrument it
410     +ifdef CONFIG_AMD_MEM_ENCRYPT
411     +KCOV_INSTRUMENT_cmdline.o := n
412     +KASAN_SANITIZE_cmdline.o := n
413     +
414     +ifdef CONFIG_FUNCTION_TRACER
415     +CFLAGS_REMOVE_cmdline.o = -pg
416     +endif
417     +
418     +CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
419     +endif
420     +
421     inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
422     inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
423     quiet_cmd_inat_tables = GEN $@
424     diff --git a/drivers/base/dd.c b/drivers/base/dd.c
425     index f5b74856784a..d48b310c4760 100644
426     --- a/drivers/base/dd.c
427     +++ b/drivers/base/dd.c
428     @@ -482,7 +482,7 @@ re_probe:
429    
430     ret = dma_configure(dev);
431     if (ret)
432     - goto dma_failed;
433     + goto probe_failed;
434    
435     if (driver_sysfs_add(dev)) {
436     printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
437     @@ -537,14 +537,13 @@ re_probe:
438     goto done;
439    
440     probe_failed:
441     - dma_deconfigure(dev);
442     -dma_failed:
443     if (dev->bus)
444     blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
445     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
446     pinctrl_bind_failed:
447     device_links_no_driver(dev);
448     devres_release_all(dev);
449     + dma_deconfigure(dev);
450     driver_sysfs_remove(dev);
451     dev->driver = NULL;
452     dev_set_drvdata(dev, NULL);
453     diff --git a/drivers/block/brd.c b/drivers/block/brd.c
454     index c18586fccb6f..17defbf4f332 100644
455     --- a/drivers/block/brd.c
456     +++ b/drivers/block/brd.c
457     @@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
458     /*
459     * Must use NOIO because we don't want to recurse back into the
460     * block or filesystem layers from page reclaim.
461     - *
462     - * Cannot support DAX and highmem, because our ->direct_access
463     - * routine for DAX must return memory that is always addressable.
464     - * If DAX was reworked to use pfns and kmap throughout, this
465     - * restriction might be able to be lifted.
466     */
467     - gfp_flags = GFP_NOIO | __GFP_ZERO;
468     + gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
469     page = alloc_page(gfp_flags);
470     if (!page)
471     return NULL;
472     diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
473     index f40419959656..794eeff0d5d2 100644
474     --- a/drivers/clk/hisilicon/clk-hi3660.c
475     +++ b/drivers/clk/hisilicon/clk-hi3660.c
476     @@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
477     "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
478     { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
479     "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
480     + /*
481     + * clk_gate_ufs_subsys is a system bus clock, mark it as critical
482     + * clock and keep it on for system suspend and resume.
483     + */
484     { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
485     - CLK_SET_RATE_PARENT, 0x50, 21, 0, },
486     + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
487     { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
488     CLK_SET_RATE_PARENT, 0x50, 28, 0, },
489     { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
490     diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
491     index f54e4015b0b1..18842d660317 100644
492     --- a/drivers/clk/mediatek/clk-pll.c
493     +++ b/drivers/clk/mediatek/clk-pll.c
494     @@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
495     return ((unsigned long)vco + postdiv - 1) / postdiv;
496     }
497    
498     +static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
499     +{
500     + u32 r;
501     +
502     + if (pll->tuner_en_addr) {
503     + r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
504     + writel(r, pll->tuner_en_addr);
505     + } else if (pll->tuner_addr) {
506     + r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
507     + writel(r, pll->tuner_addr);
508     + }
509     +}
510     +
511     +static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
512     +{
513     + u32 r;
514     +
515     + if (pll->tuner_en_addr) {
516     + r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
517     + writel(r, pll->tuner_en_addr);
518     + } else if (pll->tuner_addr) {
519     + r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
520     + writel(r, pll->tuner_addr);
521     + }
522     +}
523     +
524     static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
525     int postdiv)
526     {
527     @@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
528    
529     pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
530    
531     + /* disable tuner */
532     + __mtk_pll_tuner_disable(pll);
533     +
534     /* set postdiv */
535     val = readl(pll->pd_addr);
536     val &= ~(POSTDIV_MASK << pll->data->pd_shift);
537     @@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
538     if (pll->tuner_addr)
539     writel(con1 + 1, pll->tuner_addr);
540    
541     + /* restore tuner_en */
542     + __mtk_pll_tuner_enable(pll);
543     +
544     if (pll_en)
545     udelay(20);
546     }
547     @@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
548     r |= pll->data->en_mask;
549     writel(r, pll->base_addr + REG_CON0);
550    
551     - if (pll->tuner_en_addr) {
552     - r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
553     - writel(r, pll->tuner_en_addr);
554     - } else if (pll->tuner_addr) {
555     - r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
556     - writel(r, pll->tuner_addr);
557     - }
558     + __mtk_pll_tuner_enable(pll);
559    
560     udelay(20);
561    
562     @@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
563     writel(r, pll->base_addr + REG_CON0);
564     }
565    
566     - if (pll->tuner_en_addr) {
567     - r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
568     - writel(r, pll->tuner_en_addr);
569     - } else if (pll->tuner_addr) {
570     - r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
571     - writel(r, pll->tuner_addr);
572     - }
573     + __mtk_pll_tuner_disable(pll);
574    
575     r = readl(pll->base_addr + REG_CON0);
576     r &= ~CON0_BASE_EN;
577     diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
578     index e431661fe874..ecbae8acd05b 100644
579     --- a/drivers/clk/rockchip/clk-rk3328.c
580     +++ b/drivers/clk/rockchip/clk-rk3328.c
581     @@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
582     RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
583     RK3328_CLKGATE_CON(2), 12, GFLAGS),
584     COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
585     - RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
586     + RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
587     RK3328_CLKGATE_CON(2), 4, GFLAGS),
588     COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
589     RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
590     @@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
591     GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
592     RK3328_CLKGATE_CON(25), 1, GFLAGS),
593     GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
594     - RK3328_CLKGATE_CON(25), 0, GFLAGS),
595     + RK3328_CLKGATE_CON(25), 2, GFLAGS),
596     GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
597     - RK3328_CLKGATE_CON(25), 1, GFLAGS),
598     + RK3328_CLKGATE_CON(25), 3, GFLAGS),
599     GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
600     - RK3328_CLKGATE_CON(25), 0, GFLAGS),
601     + RK3328_CLKGATE_CON(25), 4, GFLAGS),
602     GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
603     - RK3328_CLKGATE_CON(25), 1, GFLAGS),
604     + RK3328_CLKGATE_CON(25), 5, GFLAGS),
605     GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
606     - RK3328_CLKGATE_CON(25), 0, GFLAGS),
607     + RK3328_CLKGATE_CON(25), 6, GFLAGS),
608    
609     COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
610     RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
611     @@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
612    
613     /* PD_GMAC */
614     COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
615     - RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
616     + RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
617     RK3328_CLKGATE_CON(3), 2, GFLAGS),
618     COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
619     RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
620     @@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
621    
622     /* PD_PERI */
623     GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
624     - GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
625     + GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
626    
627     GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
628     GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
629     @@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
630     &rk3328_cpuclk_data, rk3328_cpuclk_rates,
631     ARRAY_SIZE(rk3328_cpuclk_rates));
632    
633     - rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
634     + rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
635     ROCKCHIP_SOFTRST_HIWORD_MASK);
636    
637     rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
638     diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
639     index ebd9436d2c7c..1ad53d1016a3 100644
640     --- a/drivers/clk/sunxi-ng/ccu_nkmp.c
641     +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
642     @@ -160,7 +160,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
643     unsigned long parent_rate)
644     {
645     struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
646     - u32 n_mask, k_mask, m_mask, p_mask;
647     + u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
648     struct _ccu_nkmp _nkmp;
649     unsigned long flags;
650     u32 reg;
651     @@ -179,10 +179,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
652    
653     ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
654    
655     - n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
656     - k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
657     - m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
658     - p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
659     + if (nkmp->n.width)
660     + n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
661     + nkmp->n.shift);
662     + if (nkmp->k.width)
663     + k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
664     + nkmp->k.shift);
665     + if (nkmp->m.width)
666     + m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
667     + nkmp->m.shift);
668     + if (nkmp->p.width)
669     + p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
670     + nkmp->p.shift);
671    
672     spin_lock_irqsave(nkmp->common.lock, flags);
673    
674     diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
675     index 830d1c87fa7c..dc87866233b9 100644
676     --- a/drivers/clk/tegra/clk-pll.c
677     +++ b/drivers/clk/tegra/clk-pll.c
678     @@ -662,8 +662,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
679     pll_override_writel(val, params->pmc_divp_reg, pll);
680    
681     val = pll_override_readl(params->pmc_divnm_reg, pll);
682     - val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
683     - ~(divn_mask(pll) << div_nmp->override_divn_shift);
684     + val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
685     + (divn_mask(pll) << div_nmp->override_divn_shift));
686     val |= (cfg->m << div_nmp->override_divm_shift) |
687     (cfg->n << div_nmp->override_divn_shift);
688     pll_override_writel(val, params->pmc_divnm_reg, pll);
689     diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
690     index ba7aaf421f36..8ff326c0c406 100644
691     --- a/drivers/hwtracing/intel_th/msu.c
692     +++ b/drivers/hwtracing/intel_th/msu.c
693     @@ -84,6 +84,7 @@ struct msc_iter {
694     * @reg_base: register window base address
695     * @thdev: intel_th_device pointer
696     * @win_list: list of windows in multiblock mode
697     + * @single_sgt: single mode buffer
698     * @nr_pages: total number of pages allocated for this buffer
699     * @single_sz: amount of data in single mode
700     * @single_wrap: single mode wrap occurred
701     @@ -104,6 +105,7 @@ struct msc {
702     struct intel_th_device *thdev;
703    
704     struct list_head win_list;
705     + struct sg_table single_sgt;
706     unsigned long nr_pages;
707     unsigned long single_sz;
708     unsigned int single_wrap : 1;
709     @@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
710     */
711     static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
712     {
713     + unsigned long nr_pages = size >> PAGE_SHIFT;
714     unsigned int order = get_order(size);
715     struct page *page;
716     + int ret;
717    
718     if (!size)
719     return 0;
720    
721     + ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
722     + if (ret)
723     + goto err_out;
724     +
725     + ret = -ENOMEM;
726     page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
727     if (!page)
728     - return -ENOMEM;
729     + goto err_free_sgt;
730    
731     split_page(page, order);
732     - msc->nr_pages = size >> PAGE_SHIFT;
733     + sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
734     +
735     + ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
736     + DMA_FROM_DEVICE);
737     + if (ret < 0)
738     + goto err_free_pages;
739     +
740     + msc->nr_pages = nr_pages;
741     msc->base = page_address(page);
742     - msc->base_addr = page_to_phys(page);
743     + msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
744    
745     return 0;
746     +
747     +err_free_pages:
748     + __free_pages(page, order);
749     +
750     +err_free_sgt:
751     + sg_free_table(&msc->single_sgt);
752     +
753     +err_out:
754     + return ret;
755     }
756    
757     /**
758     @@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
759     {
760     unsigned long off;
761    
762     + dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
763     + 1, DMA_FROM_DEVICE);
764     + sg_free_table(&msc->single_sgt);
765     +
766     for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
767     struct page *page = virt_to_page(msc->base + off);
768    
769     diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
770     index 9d55e104400c..9ec9197edffa 100644
771     --- a/drivers/hwtracing/stm/core.c
772     +++ b/drivers/hwtracing/stm/core.c
773     @@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
774     static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
775     {
776     struct stp_master *master;
777     - size_t size;
778    
779     - size = ALIGN(stm->data->sw_nchannels, 8) / 8;
780     - size += sizeof(struct stp_master);
781     - master = kzalloc(size, GFP_ATOMIC);
782     + master = kzalloc(struct_size(master, chan_map,
783     + BITS_TO_LONGS(stm->data->sw_nchannels)),
784     + GFP_ATOMIC);
785     if (!master)
786     return -ENOMEM;
787    
788     @@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
789     bitmap_release_region(&master->chan_map[0], output->channel,
790     ilog2(output->nr_chans));
791    
792     - output->nr_chans = 0;
793     master->nr_free += output->nr_chans;
794     + output->nr_chans = 0;
795     }
796    
797     /*
798     diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
799     index 0d03341317c4..121d3cb7ddd1 100644
800     --- a/drivers/iommu/tegra-smmu.c
801     +++ b/drivers/iommu/tegra-smmu.c
802     @@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
803     #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
804     #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
805     #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
806     -#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
807     #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
808     SMMU_TLB_FLUSH_VA_MATCH_SECTION)
809     #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
810     @@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
811     {
812     u32 value;
813    
814     - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
815     - SMMU_TLB_FLUSH_VA_MATCH_ALL;
816     + if (smmu->soc->num_asids == 4)
817     + value = (asid & 0x3) << 29;
818     + else
819     + value = (asid & 0x7f) << 24;
820     +
821     + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
822     smmu_writel(smmu, value, SMMU_TLB_FLUSH);
823     }
824    
825     @@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
826     {
827     u32 value;
828    
829     - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
830     - SMMU_TLB_FLUSH_VA_SECTION(iova);
831     + if (smmu->soc->num_asids == 4)
832     + value = (asid & 0x3) << 29;
833     + else
834     + value = (asid & 0x7f) << 24;
835     +
836     + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
837     smmu_writel(smmu, value, SMMU_TLB_FLUSH);
838     }
839    
840     @@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
841     {
842     u32 value;
843    
844     - value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
845     - SMMU_TLB_FLUSH_VA_GROUP(iova);
846     + if (smmu->soc->num_asids == 4)
847     + value = (asid & 0x3) << 29;
848     + else
849     + value = (asid & 0x7f) << 24;
850     +
851     + value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
852     smmu_writel(smmu, value, SMMU_TLB_FLUSH);
853     }
854    
855     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
856     index 6fc93834da44..151aa95775be 100644
857     --- a/drivers/md/dm-cache-metadata.c
858     +++ b/drivers/md/dm-cache-metadata.c
859     @@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
860     if (r)
861     return r;
862    
863     - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
864     + for (b = 0; ; b++) {
865     r = fn(context, cmd->discard_block_size, to_dblock(b),
866     dm_bitset_cursor_get_value(&c));
867     if (r)
868     break;
869     +
870     + if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
871     + break;
872     +
873     + r = dm_bitset_cursor_next(&c);
874     + if (r)
875     + break;
876     }
877    
878     dm_bitset_cursor_end(&c);
879     diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
880     index fddffe251bf6..f496213f8b67 100644
881     --- a/drivers/md/dm-delay.c
882     +++ b/drivers/md/dm-delay.c
883     @@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
884     {
885     struct delay_c *dc = ti->private;
886    
887     - destroy_workqueue(dc->kdelayd_wq);
888     + if (dc->kdelayd_wq)
889     + destroy_workqueue(dc->kdelayd_wq);
890    
891     if (dc->read.dev)
892     dm_put_device(ti, dc->read.dev);
893     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
894     index bc6ef2303f0b..dbdcc543832d 100644
895     --- a/drivers/md/dm-integrity.c
896     +++ b/drivers/md/dm-integrity.c
897     @@ -2557,7 +2557,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
898     if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
899     return -EINVAL;
900     } else {
901     - __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
902     + __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
903     meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
904     >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
905     meta_size <<= ic->log2_buffer_sectors;
906     @@ -3428,7 +3428,7 @@ try_smaller_buffer:
907     DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
908     DEBUG_print(" journal_entries %u\n", ic->journal_entries);
909     DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
910     - DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
911     + DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
912     DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
913     DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
914     DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
915     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
916     index 419362c2d8ac..baa966e2778c 100644
917     --- a/drivers/md/dm-mpath.c
918     +++ b/drivers/md/dm-mpath.c
919     @@ -892,6 +892,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
920     if (attached_handler_name || m->hw_handler_name) {
921     INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
922     r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
923     + kfree(attached_handler_name);
924     if (r) {
925     dm_put_device(ti, p->path.dev);
926     goto bad;
927     @@ -906,7 +907,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
928    
929     return p;
930     bad:
931     - kfree(attached_handler_name);
932     free_pgpath(p);
933     return ERR_PTR(r);
934     }
935     diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
936     index fa68336560c3..d8334cd45d7c 100644
937     --- a/drivers/md/dm-zoned-metadata.c
938     +++ b/drivers/md/dm-zoned-metadata.c
939     @@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
940     goto out;
941     }
942    
943     + if (!nr_blkz)
944     + break;
945     +
946     /* Process report */
947     for (i = 0; i < nr_blkz; i++) {
948     ret = dmz_init_zone(zmd, zone, &blkz[i]);
949     @@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
950     /* Get zone information from disk */
951     ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
952     &blkz, &nr_blkz, GFP_NOIO);
953     + if (!nr_blkz)
954     + ret = -EIO;
955     if (ret) {
956     dmz_dev_err(zmd->dev, "Get zone %u report failed",
957     dmz_id(zmd, zone));
958     diff --git a/drivers/md/md.c b/drivers/md/md.c
959     index 8668793262d0..b924f62e2cd5 100644
960     --- a/drivers/md/md.c
961     +++ b/drivers/md/md.c
962     @@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev)
963     mddev->sync_speed_max : sysctl_speed_limit_max;
964     }
965    
966     -static void * flush_info_alloc(gfp_t gfp_flags, void *data)
967     -{
968     - return kzalloc(sizeof(struct flush_info), gfp_flags);
969     -}
970     -static void flush_info_free(void *flush_info, void *data)
971     -{
972     - kfree(flush_info);
973     -}
974     -
975     -static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
976     -{
977     - return kzalloc(sizeof(struct flush_bio), gfp_flags);
978     -}
979     -static void flush_bio_free(void *flush_bio, void *data)
980     -{
981     - kfree(flush_bio);
982     -}
983     -
984     static struct ctl_table_header *raid_table_header;
985    
986     static struct ctl_table raid_table[] = {
987     @@ -429,54 +411,31 @@ static int md_congested(void *data, int bits)
988     /*
989     * Generic flush handling for md
990     */
991     -static void submit_flushes(struct work_struct *ws)
992     -{
993     - struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
994     - struct mddev *mddev = fi->mddev;
995     - struct bio *bio = fi->bio;
996     -
997     - bio->bi_opf &= ~REQ_PREFLUSH;
998     - md_handle_request(mddev, bio);
999     -
1000     - mempool_free(fi, mddev->flush_pool);
1001     -}
1002    
1003     -static void md_end_flush(struct bio *fbio)
1004     +static void md_end_flush(struct bio *bio)
1005     {
1006     - struct flush_bio *fb = fbio->bi_private;
1007     - struct md_rdev *rdev = fb->rdev;
1008     - struct flush_info *fi = fb->fi;
1009     - struct bio *bio = fi->bio;
1010     - struct mddev *mddev = fi->mddev;
1011     + struct md_rdev *rdev = bio->bi_private;
1012     + struct mddev *mddev = rdev->mddev;
1013    
1014     rdev_dec_pending(rdev, mddev);
1015    
1016     - if (atomic_dec_and_test(&fi->flush_pending)) {
1017     - if (bio->bi_iter.bi_size == 0) {
1018     - /* an empty barrier - all done */
1019     - bio_endio(bio);
1020     - mempool_free(fi, mddev->flush_pool);
1021     - } else {
1022     - INIT_WORK(&fi->flush_work, submit_flushes);
1023     - queue_work(md_wq, &fi->flush_work);
1024     - }
1025     + if (atomic_dec_and_test(&mddev->flush_pending)) {
1026     + /* The pre-request flush has finished */
1027     + queue_work(md_wq, &mddev->flush_work);
1028     }
1029     -
1030     - mempool_free(fb, mddev->flush_bio_pool);
1031     - bio_put(fbio);
1032     + bio_put(bio);
1033     }
1034    
1035     -void md_flush_request(struct mddev *mddev, struct bio *bio)
1036     +static void md_submit_flush_data(struct work_struct *ws);
1037     +
1038     +static void submit_flushes(struct work_struct *ws)
1039     {
1040     + struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1041     struct md_rdev *rdev;
1042     - struct flush_info *fi;
1043     -
1044     - fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
1045     -
1046     - fi->bio = bio;
1047     - fi->mddev = mddev;
1048     - atomic_set(&fi->flush_pending, 1);
1049    
1050     + mddev->start_flush = ktime_get_boottime();
1051     + INIT_WORK(&mddev->flush_work, md_submit_flush_data);
1052     + atomic_set(&mddev->flush_pending, 1);
1053     rcu_read_lock();
1054     rdev_for_each_rcu(rdev, mddev)
1055     if (rdev->raid_disk >= 0 &&
1056     @@ -486,37 +445,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
1057     * we reclaim rcu_read_lock
1058     */
1059     struct bio *bi;
1060     - struct flush_bio *fb;
1061     atomic_inc(&rdev->nr_pending);
1062     atomic_inc(&rdev->nr_pending);
1063     rcu_read_unlock();
1064     -
1065     - fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
1066     - fb->fi = fi;
1067     - fb->rdev = rdev;
1068     -
1069     bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
1070     - bio_set_dev(bi, rdev->bdev);
1071     bi->bi_end_io = md_end_flush;
1072     - bi->bi_private = fb;
1073     + bi->bi_private = rdev;
1074     + bio_set_dev(bi, rdev->bdev);
1075     bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1076     -
1077     - atomic_inc(&fi->flush_pending);
1078     + atomic_inc(&mddev->flush_pending);
1079     submit_bio(bi);
1080     -
1081     rcu_read_lock();
1082     rdev_dec_pending(rdev, mddev);
1083     }
1084     rcu_read_unlock();
1085     + if (atomic_dec_and_test(&mddev->flush_pending))
1086     + queue_work(md_wq, &mddev->flush_work);
1087     +}
1088     +
1089     +static void md_submit_flush_data(struct work_struct *ws)
1090     +{
1091     + struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1092     + struct bio *bio = mddev->flush_bio;
1093     +
1094     + /*
1095     + * must reset flush_bio before calling into md_handle_request to avoid a
1096     + * deadlock, because other bios passed md_handle_request suspend check
1097     + * could wait for this and below md_handle_request could wait for those
1098     + * bios because of suspend check
1099     + */
1100     + mddev->last_flush = mddev->start_flush;
1101     + mddev->flush_bio = NULL;
1102     + wake_up(&mddev->sb_wait);
1103    
1104     - if (atomic_dec_and_test(&fi->flush_pending)) {
1105     - if (bio->bi_iter.bi_size == 0) {
1106     + if (bio->bi_iter.bi_size == 0) {
1107     + /* an empty barrier - all done */
1108     + bio_endio(bio);
1109     + } else {
1110     + bio->bi_opf &= ~REQ_PREFLUSH;
1111     + md_handle_request(mddev, bio);
1112     + }
1113     +}
1114     +
1115     +void md_flush_request(struct mddev *mddev, struct bio *bio)
1116     +{
1117     + ktime_t start = ktime_get_boottime();
1118     + spin_lock_irq(&mddev->lock);
1119     + wait_event_lock_irq(mddev->sb_wait,
1120     + !mddev->flush_bio ||
1121     + ktime_after(mddev->last_flush, start),
1122     + mddev->lock);
1123     + if (!ktime_after(mddev->last_flush, start)) {
1124     + WARN_ON(mddev->flush_bio);
1125     + mddev->flush_bio = bio;
1126     + bio = NULL;
1127     + }
1128     + spin_unlock_irq(&mddev->lock);
1129     +
1130     + if (!bio) {
1131     + INIT_WORK(&mddev->flush_work, submit_flushes);
1132     + queue_work(md_wq, &mddev->flush_work);
1133     + } else {
1134     + /* flush was performed for some other bio while we waited. */
1135     + if (bio->bi_iter.bi_size == 0)
1136     /* an empty barrier - all done */
1137     bio_endio(bio);
1138     - mempool_free(fi, mddev->flush_pool);
1139     - } else {
1140     - INIT_WORK(&fi->flush_work, submit_flushes);
1141     - queue_work(md_wq, &fi->flush_work);
1142     + else {
1143     + bio->bi_opf &= ~REQ_PREFLUSH;
1144     + mddev->pers->make_request(mddev, bio);
1145     }
1146     }
1147     }
1148     @@ -566,6 +562,7 @@ void mddev_init(struct mddev *mddev)
1149     atomic_set(&mddev->openers, 0);
1150     atomic_set(&mddev->active_io, 0);
1151     spin_lock_init(&mddev->lock);
1152     + atomic_set(&mddev->flush_pending, 0);
1153     init_waitqueue_head(&mddev->sb_wait);
1154     init_waitqueue_head(&mddev->recovery_wait);
1155     mddev->reshape_position = MaxSector;
1156     @@ -2863,8 +2860,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
1157     err = 0;
1158     }
1159     } else if (cmd_match(buf, "re-add")) {
1160     - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1161     - rdev->saved_raid_disk >= 0) {
1162     + if (!rdev->mddev->pers)
1163     + err = -EINVAL;
1164     + else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1165     + rdev->saved_raid_disk >= 0) {
1166     /* clear_bit is performed _after_ all the devices
1167     * have their local Faulty bit cleared. If any writes
1168     * happen in the meantime in the local node, they
1169     @@ -5519,22 +5518,6 @@ int md_run(struct mddev *mddev)
1170     if (err)
1171     return err;
1172     }
1173     - if (mddev->flush_pool == NULL) {
1174     - mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
1175     - flush_info_free, mddev);
1176     - if (!mddev->flush_pool) {
1177     - err = -ENOMEM;
1178     - goto abort;
1179     - }
1180     - }
1181     - if (mddev->flush_bio_pool == NULL) {
1182     - mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
1183     - flush_bio_free, mddev);
1184     - if (!mddev->flush_bio_pool) {
1185     - err = -ENOMEM;
1186     - goto abort;
1187     - }
1188     - }
1189    
1190     spin_lock(&pers_lock);
1191     pers = find_pers(mddev->level, mddev->clevel);
1192     @@ -5694,15 +5677,8 @@ int md_run(struct mddev *mddev)
1193     return 0;
1194    
1195     abort:
1196     - if (mddev->flush_bio_pool) {
1197     - mempool_destroy(mddev->flush_bio_pool);
1198     - mddev->flush_bio_pool = NULL;
1199     - }
1200     - if (mddev->flush_pool){
1201     - mempool_destroy(mddev->flush_pool);
1202     - mddev->flush_pool = NULL;
1203     - }
1204     -
1205     + bioset_exit(&mddev->bio_set);
1206     + bioset_exit(&mddev->sync_set);
1207     return err;
1208     }
1209     EXPORT_SYMBOL_GPL(md_run);
1210     @@ -5906,14 +5882,6 @@ static void __md_stop(struct mddev *mddev)
1211     mddev->to_remove = &md_redundancy_group;
1212     module_put(pers->owner);
1213     clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1214     - if (mddev->flush_bio_pool) {
1215     - mempool_destroy(mddev->flush_bio_pool);
1216     - mddev->flush_bio_pool = NULL;
1217     - }
1218     - if (mddev->flush_pool) {
1219     - mempool_destroy(mddev->flush_pool);
1220     - mddev->flush_pool = NULL;
1221     - }
1222     }
1223    
1224     void md_stop(struct mddev *mddev)
1225     diff --git a/drivers/md/md.h b/drivers/md/md.h
1226     index 8afd6bfdbfb9..325cb2136a49 100644
1227     --- a/drivers/md/md.h
1228     +++ b/drivers/md/md.h
1229     @@ -252,19 +252,6 @@ enum mddev_sb_flags {
1230     MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
1231     };
1232    
1233     -#define NR_FLUSH_INFOS 8
1234     -#define NR_FLUSH_BIOS 64
1235     -struct flush_info {
1236     - struct bio *bio;
1237     - struct mddev *mddev;
1238     - struct work_struct flush_work;
1239     - atomic_t flush_pending;
1240     -};
1241     -struct flush_bio {
1242     - struct flush_info *fi;
1243     - struct md_rdev *rdev;
1244     -};
1245     -
1246     struct mddev {
1247     void *private;
1248     struct md_personality *pers;
1249     @@ -470,8 +457,16 @@ struct mddev {
1250     * metadata and bitmap writes
1251     */
1252    
1253     - mempool_t *flush_pool;
1254     - mempool_t *flush_bio_pool;
1255     + /* Generic flush handling.
1256     + * The last to finish preflush schedules a worker to submit
1257     + * the rest of the request (without the REQ_PREFLUSH flag).
1258     + */
1259     + struct bio *flush_bio;
1260     + atomic_t flush_pending;
1261     + ktime_t start_flush, last_flush; /* last_flush is when the last completed
1262     + * flush was started.
1263     + */
1264     + struct work_struct flush_work;
1265     struct work_struct event_work; /* used by dm to report failure event */
1266     void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
1267     struct md_cluster_info *cluster_info;
1268     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1269     index 828d86605fb6..f237d6f30752 100644
1270     --- a/drivers/md/raid5.c
1271     +++ b/drivers/md/raid5.c
1272     @@ -4185,7 +4185,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1273     /* now write out any block on a failed drive,
1274     * or P or Q if they were recomputed
1275     */
1276     - BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
1277     + dev = NULL;
1278     if (s->failed == 2) {
1279     dev = &sh->dev[s->failed_num[1]];
1280     s->locked++;
1281     @@ -4210,6 +4210,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1282     set_bit(R5_LOCKED, &dev->flags);
1283     set_bit(R5_Wantwrite, &dev->flags);
1284     }
1285     + if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
1286     + "%s: disk%td not up to date\n",
1287     + mdname(conf->mddev),
1288     + dev - (struct r5dev *) &sh->dev)) {
1289     + clear_bit(R5_LOCKED, &dev->flags);
1290     + clear_bit(R5_Wantwrite, &dev->flags);
1291     + s->locked--;
1292     + }
1293     clear_bit(STRIPE_DEGRADED, &sh->state);
1294    
1295     set_bit(STRIPE_INSYNC, &sh->state);
1296     @@ -4221,15 +4229,26 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1297     case check_state_check_result:
1298     sh->check_state = check_state_idle;
1299    
1300     - if (s->failed > 1)
1301     - break;
1302     /* handle a successful check operation, if parity is correct
1303     * we are done. Otherwise update the mismatch count and repair
1304     * parity if !MD_RECOVERY_CHECK
1305     */
1306     if (sh->ops.zero_sum_result == 0) {
1307     - /* Any parity checked was correct */
1308     - set_bit(STRIPE_INSYNC, &sh->state);
1309     + /* both parities are correct */
1310     + if (!s->failed)
1311     + set_bit(STRIPE_INSYNC, &sh->state);
1312     + else {
1313     + /* in contrast to the raid5 case we can validate
1314     + * parity, but still have a failure to write
1315     + * back
1316     + */
1317     + sh->check_state = check_state_compute_result;
1318     + /* Returning at this point means that we may go
1319     + * off and bring p and/or q uptodate again so
1320     + * we make sure to check zero_sum_result again
1321     + * to verify if p or q need writeback
1322     + */
1323     + }
1324     } else {
1325     atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
1326     if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
1327     diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
1328     index 17a34b4a819d..a9264d515e54 100644
1329     --- a/drivers/media/i2c/ov6650.c
1330     +++ b/drivers/media/i2c/ov6650.c
1331     @@ -815,6 +815,8 @@ static int ov6650_video_probe(struct i2c_client *client)
1332     if (ret < 0)
1333     return ret;
1334    
1335     + msleep(20);
1336     +
1337     /*
1338     * check and show product ID and manufacturer ID
1339     */
1340     diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
1341     index bd25faf6d13d..c8f16666256c 100644
1342     --- a/drivers/memory/tegra/mc.c
1343     +++ b/drivers/memory/tegra/mc.c
1344     @@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
1345     u32 value;
1346    
1347     /* compute the number of MC clock cycles per tick */
1348     - tick = mc->tick * clk_get_rate(mc->clk);
1349     + tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
1350     do_div(tick, NSEC_PER_SEC);
1351    
1352     value = readl(mc->regs + MC_EMEM_ARB_CFG);
1353     diff --git a/drivers/net/Makefile b/drivers/net/Makefile
1354     index 21cde7e78621..0d3ba056cda3 100644
1355     --- a/drivers/net/Makefile
1356     +++ b/drivers/net/Makefile
1357     @@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/
1358     obj-$(CONFIG_DEV_APPLETALK) += appletalk/
1359     obj-$(CONFIG_CAIF) += caif/
1360     obj-$(CONFIG_CAN) += can/
1361     -obj-$(CONFIG_NET_DSA) += dsa/
1362     +obj-y += dsa/
1363     obj-$(CONFIG_ETHERNET) += ethernet/
1364     obj-$(CONFIG_FDDI) += fddi/
1365     obj-$(CONFIG_HIPPI) += hippi/
1366     diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1367     index ffed2d4c9403..9c481823b3e8 100644
1368     --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
1369     +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1370     @@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1371     rule.port = port;
1372     rule.qpn = qpn;
1373     INIT_LIST_HEAD(&rule.list);
1374     - mlx4_err(dev, "going promisc on %x\n", port);
1375     + mlx4_info(dev, "going promisc on %x\n", port);
1376    
1377     return mlx4_flow_attach(dev, &rule, regid_p);
1378     }
1379     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1380     index 37a551436e4a..b7e3b8902e7e 100644
1381     --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1382     +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1383     @@ -8,6 +8,7 @@ config MLX5_CORE
1384     depends on PCI
1385     imply PTP_1588_CLOCK
1386     imply VXLAN
1387     + imply MLXFW
1388     default n
1389     ---help---
1390     Core driver for low level functionality of the ConnectX-4 and
1391     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1392     index 9ca4f88d7cf6..792bb8bc0cd3 100644
1393     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1394     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1395     @@ -1609,6 +1609,22 @@ static int mlx5e_flash_device(struct net_device *dev,
1396     return mlx5e_ethtool_flash_device(priv, flash);
1397     }
1398    
1399     +#ifndef CONFIG_MLX5_EN_RXNFC
1400     +/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
1401     + * otherwise this function will be defined from en_fs_ethtool.c
1402     + */
1403     +static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
1404     +{
1405     + struct mlx5e_priv *priv = netdev_priv(dev);
1406     +
1407     + if (info->cmd != ETHTOOL_GRXRINGS)
1408     + return -EOPNOTSUPP;
1409     + /* ring_count is needed by ethtool -x */
1410     + info->data = priv->channels.params.num_channels;
1411     + return 0;
1412     +}
1413     +#endif
1414     +
1415     const struct ethtool_ops mlx5e_ethtool_ops = {
1416     .get_drvinfo = mlx5e_get_drvinfo,
1417     .get_link = ethtool_op_get_link,
1418     @@ -1627,8 +1643,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1419     .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
1420     .get_rxfh = mlx5e_get_rxfh,
1421     .set_rxfh = mlx5e_set_rxfh,
1422     -#ifdef CONFIG_MLX5_EN_RXNFC
1423     .get_rxnfc = mlx5e_get_rxnfc,
1424     +#ifdef CONFIG_MLX5_EN_RXNFC
1425     .set_rxnfc = mlx5e_set_rxnfc,
1426     #endif
1427     .flash_device = mlx5e_flash_device,
1428     diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1429     index 382bb93cb090..ff5c74120c12 100644
1430     --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1431     +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1432     @@ -194,6 +194,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
1433     return;
1434     }
1435    
1436     + rcu_read_lock();
1437     for (i = 0; i < count; i++) {
1438     ipv4_addr = payload->tun_info[i].ipv4;
1439     port = be32_to_cpu(payload->tun_info[i].egress_port);
1440     @@ -209,6 +210,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
1441     neigh_event_send(n, NULL);
1442     neigh_release(n);
1443     }
1444     + rcu_read_unlock();
1445     }
1446    
1447     static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
1448     @@ -404,9 +406,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
1449    
1450     payload = nfp_flower_cmsg_get_data(skb);
1451    
1452     + rcu_read_lock();
1453     netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
1454     if (!netdev)
1455     - goto route_fail_warning;
1456     + goto fail_rcu_unlock;
1457    
1458     flow.daddr = payload->ipv4_addr;
1459     flow.flowi4_proto = IPPROTO_UDP;
1460     @@ -416,21 +419,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
1461     rt = ip_route_output_key(dev_net(netdev), &flow);
1462     err = PTR_ERR_OR_ZERO(rt);
1463     if (err)
1464     - goto route_fail_warning;
1465     + goto fail_rcu_unlock;
1466     #else
1467     - goto route_fail_warning;
1468     + goto fail_rcu_unlock;
1469     #endif
1470    
1471     /* Get the neighbour entry for the lookup */
1472     n = dst_neigh_lookup(&rt->dst, &flow.daddr);
1473     ip_rt_put(rt);
1474     if (!n)
1475     - goto route_fail_warning;
1476     - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
1477     + goto fail_rcu_unlock;
1478     + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
1479     neigh_release(n);
1480     + rcu_read_unlock();
1481     return;
1482    
1483     -route_fail_warning:
1484     +fail_rcu_unlock:
1485     + rcu_read_unlock();
1486     nfp_flower_cmsg_warn(app, "Requested route not found.\n");
1487     }
1488    
1489     diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
1490     index 04891429a554..fe4057fca83d 100644
1491     --- a/drivers/net/ieee802154/mcr20a.c
1492     +++ b/drivers/net/ieee802154/mcr20a.c
1493     @@ -539,6 +539,8 @@ mcr20a_start(struct ieee802154_hw *hw)
1494     dev_dbg(printdev(lp), "no slotted operation\n");
1495     ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1496     DAR_PHY_CTRL1_SLOTTED, 0x0);
1497     + if (ret < 0)
1498     + return ret;
1499    
1500     /* enable irq */
1501     enable_irq(lp->spi->irq);
1502     @@ -546,11 +548,15 @@ mcr20a_start(struct ieee802154_hw *hw)
1503     /* Unmask SEQ interrupt */
1504     ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
1505     DAR_PHY_CTRL2_SEQMSK, 0x0);
1506     + if (ret < 0)
1507     + return ret;
1508    
1509     /* Start the RX sequence */
1510     dev_dbg(printdev(lp), "start the RX sequence\n");
1511     ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
1512     DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
1513     + if (ret < 0)
1514     + return ret;
1515    
1516     return 0;
1517     }
1518     diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
1519     index b5edc7f96a39..685e875f5164 100644
1520     --- a/drivers/net/ppp/ppp_deflate.c
1521     +++ b/drivers/net/ppp/ppp_deflate.c
1522     @@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
1523    
1524     static int __init deflate_init(void)
1525     {
1526     - int answer = ppp_register_compressor(&ppp_deflate);
1527     - if (answer == 0)
1528     - printk(KERN_INFO
1529     - "PPP Deflate Compression module registered\n");
1530     - ppp_register_compressor(&ppp_deflate_draft);
1531     - return answer;
1532     + int rc;
1533     +
1534     + rc = ppp_register_compressor(&ppp_deflate);
1535     + if (rc)
1536     + return rc;
1537     +
1538     + rc = ppp_register_compressor(&ppp_deflate_draft);
1539     + if (rc) {
1540     + ppp_unregister_compressor(&ppp_deflate);
1541     + return rc;
1542     + }
1543     +
1544     + pr_info("PPP Deflate Compression module registered\n");
1545     + return 0;
1546     }
1547    
1548     static void __exit deflate_cleanup(void)
1549     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1550     index 9195f3476b1d..366217263d70 100644
1551     --- a/drivers/net/usb/qmi_wwan.c
1552     +++ b/drivers/net/usb/qmi_wwan.c
1553     @@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
1554     {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
1555     {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1556     {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1557     + {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
1558     + {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
1559     + {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
1560     + {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
1561     + {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
1562     {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1563     {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1564     {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1565     + {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
1566     + {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
1567     {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1568     {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
1569     {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1570     @@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
1571     {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
1572     {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
1573     {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
1574     + {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
1575     {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
1576     {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
1577     {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
1578     @@ -1200,7 +1208,9 @@ static const struct usb_device_id products[] = {
1579     {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
1580     {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
1581     {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1582     + {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
1583     {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1584     + {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
1585     {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1586     {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1587     {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1588     @@ -1240,6 +1250,8 @@ static const struct usb_device_id products[] = {
1589     {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1590     {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1591     {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1592     + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
1593     + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
1594     {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
1595     {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
1596     {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
1597     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1598     index b53148f972a4..036d1d82d93e 100644
1599     --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1600     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
1601     @@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
1602     }
1603    
1604     /* iwl_mvm_create_skb Adds the rxb to a new skb */
1605     -static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
1606     - u16 len, u8 crypt_len,
1607     - struct iwl_rx_cmd_buffer *rxb)
1608     +static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
1609     + struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
1610     + struct iwl_rx_cmd_buffer *rxb)
1611     {
1612     struct iwl_rx_packet *pkt = rxb_addr(rxb);
1613     struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
1614     @@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
1615     * present before copying packet data.
1616     */
1617     hdrlen += crypt_len;
1618     +
1619     + if (WARN_ONCE(headlen < hdrlen,
1620     + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
1621     + hdrlen, len, crypt_len)) {
1622     + /*
1623     + * We warn and trace because we want to be able to see
1624     + * it in trace-cmd as well.
1625     + */
1626     + IWL_DEBUG_RX(mvm,
1627     + "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
1628     + hdrlen, len, crypt_len);
1629     + return -EINVAL;
1630     + }
1631     +
1632     skb_put_data(skb, hdr, hdrlen);
1633     skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
1634    
1635     @@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
1636     skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
1637     fraglen, rxb->truesize);
1638     }
1639     +
1640     + return 0;
1641     }
1642    
1643     /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
1644     @@ -1425,7 +1441,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1645     rx_status->boottime_ns = ktime_get_boot_ns();
1646     }
1647    
1648     - iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
1649     + if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
1650     + kfree_skb(skb);
1651     + goto out;
1652     + }
1653     +
1654     if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
1655     iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
1656     out:
1657     diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
1658     index 27a49068d32d..57ad56435dda 100644
1659     --- a/drivers/net/wireless/intersil/p54/p54pci.c
1660     +++ b/drivers/net/wireless/intersil/p54/p54pci.c
1661     @@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
1662     err = pci_enable_device(pdev);
1663     if (err) {
1664     dev_err(&pdev->dev, "Cannot enable new PCI device\n");
1665     - return err;
1666     + goto err_put;
1667     }
1668    
1669     mem_addr = pci_resource_start(pdev, 0);
1670     @@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
1671     pci_release_regions(pdev);
1672     err_disable_dev:
1673     pci_disable_device(pdev);
1674     +err_put:
1675     pci_dev_put(pdev);
1676     return err;
1677     }
1678     diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
1679     index 0c6e8b44b4ed..c60b465f6fe4 100644
1680     --- a/drivers/parisc/led.c
1681     +++ b/drivers/parisc/led.c
1682     @@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
1683     break;
1684    
1685     case DISPLAY_MODEL_LASI:
1686     + /* Skip to register LED in QEMU */
1687     + if (running_on_qemu)
1688     + return 1;
1689     LED_DATA_REG = data_reg;
1690     led_func_ptr = led_LASI_driver;
1691     printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
1692     diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
1693     index c8febb009454..6a4e435bd35f 100644
1694     --- a/drivers/pci/controller/pcie-rcar.c
1695     +++ b/drivers/pci/controller/pcie-rcar.c
1696     @@ -46,6 +46,7 @@
1697    
1698     /* Transfer control */
1699     #define PCIETCTLR 0x02000
1700     +#define DL_DOWN BIT(3)
1701     #define CFINIT 1
1702     #define PCIETSTR 0x02004
1703     #define DATA_LINK_ACTIVE 1
1704     @@ -94,6 +95,7 @@
1705     #define MACCTLR 0x011058
1706     #define SPEED_CHANGE BIT(24)
1707     #define SCRAMBLE_DISABLE BIT(27)
1708     +#define PMSR 0x01105c
1709     #define MACS2R 0x011078
1710     #define MACCGSPSETR 0x011084
1711     #define SPCNGRSN BIT(31)
1712     @@ -1130,6 +1132,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1713     pcie = pci_host_bridge_priv(bridge);
1714    
1715     pcie->dev = dev;
1716     + platform_set_drvdata(pdev, pcie);
1717    
1718     err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
1719     if (err)
1720     @@ -1221,10 +1224,28 @@ err_free_bridge:
1721     return err;
1722     }
1723    
1724     +static int rcar_pcie_resume_noirq(struct device *dev)
1725     +{
1726     + struct rcar_pcie *pcie = dev_get_drvdata(dev);
1727     +
1728     + if (rcar_pci_read_reg(pcie, PMSR) &&
1729     + !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1730     + return 0;
1731     +
1732     + /* Re-establish the PCIe link */
1733     + rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1734     + return rcar_pcie_wait_for_dl(pcie);
1735     +}
1736     +
1737     +static const struct dev_pm_ops rcar_pcie_pm_ops = {
1738     + .resume_noirq = rcar_pcie_resume_noirq,
1739     +};
1740     +
1741     static struct platform_driver rcar_pcie_driver = {
1742     .driver = {
1743     .name = "rcar-pcie",
1744     .of_match_table = rcar_pcie_of_match,
1745     + .pm = &rcar_pcie_pm_ops,
1746     .suppress_bind_attrs = true,
1747     },
1748     .probe = rcar_pcie_probe,
1749     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1750     index 30649addc625..61f2ef28ea1c 100644
1751     --- a/drivers/pci/pci.c
1752     +++ b/drivers/pci/pci.c
1753     @@ -6135,8 +6135,7 @@ static int __init pci_setup(char *str)
1754     } else if (!strncmp(str, "pcie_scan_all", 13)) {
1755     pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
1756     } else if (!strncmp(str, "disable_acs_redir=", 18)) {
1757     - disable_acs_redir_param =
1758     - kstrdup(str + 18, GFP_KERNEL);
1759     + disable_acs_redir_param = str + 18;
1760     } else {
1761     printk(KERN_ERR "PCI: Unknown option `%s'\n",
1762     str);
1763     @@ -6147,3 +6146,19 @@ static int __init pci_setup(char *str)
1764     return 0;
1765     }
1766     early_param("pci", pci_setup);
1767     +
1768     +/*
1769     + * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
1770     + * to data in the __initdata section which will be freed after the init
1771     + * sequence is complete. We can't allocate memory in pci_setup() because some
1772     + * architectures do not have any memory allocation service available during
1773     + * an early_param() call. So we allocate memory and copy the variable here
1774     + * before the init section is freed.
1775     + */
1776     +static int __init pci_realloc_setup_params(void)
1777     +{
1778     + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
1779     +
1780     + return 0;
1781     +}
1782     +pure_initcall(pci_realloc_setup_params);
1783     diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
1784     index 6e0d1528d471..ab25752f00d9 100644
1785     --- a/drivers/pci/pci.h
1786     +++ b/drivers/pci/pci.h
1787     @@ -530,7 +530,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
1788     void pci_aer_clear_device_status(struct pci_dev *dev);
1789     #else
1790     static inline void pci_no_aer(void) { }
1791     -static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
1792     +static inline void pci_aer_init(struct pci_dev *d) { }
1793     static inline void pci_aer_exit(struct pci_dev *d) { }
1794     static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
1795     static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
1796     diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
1797     index f78860ce884b..1117b25fbe0b 100644
1798     --- a/drivers/pci/pcie/aspm.c
1799     +++ b/drivers/pci/pcie/aspm.c
1800     @@ -198,6 +198,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
1801     link->clkpm_capable = (blacklist) ? 0 : capable;
1802     }
1803    
1804     +static bool pcie_retrain_link(struct pcie_link_state *link)
1805     +{
1806     + struct pci_dev *parent = link->pdev;
1807     + unsigned long start_jiffies;
1808     + u16 reg16;
1809     +
1810     + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
1811     + reg16 |= PCI_EXP_LNKCTL_RL;
1812     + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
1813     + if (parent->clear_retrain_link) {
1814     + /*
1815     + * Due to an erratum in some devices the Retrain Link bit
1816     + * needs to be cleared again manually to allow the link
1817     + * training to succeed.
1818     + */
1819     + reg16 &= ~PCI_EXP_LNKCTL_RL;
1820     + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
1821     + }
1822     +
1823     + /* Wait for link training end. Break out after waiting for timeout */
1824     + start_jiffies = jiffies;
1825     + for (;;) {
1826     + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
1827     + if (!(reg16 & PCI_EXP_LNKSTA_LT))
1828     + break;
1829     + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
1830     + break;
1831     + msleep(1);
1832     + }
1833     + return !(reg16 & PCI_EXP_LNKSTA_LT);
1834     +}
1835     +
1836     /*
1837     * pcie_aspm_configure_common_clock: check if the 2 ends of a link
1838     * could use common clock. If they are, configure them to use the
1839     @@ -207,7 +239,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
1840     {
1841     int same_clock = 1;
1842     u16 reg16, parent_reg, child_reg[8];
1843     - unsigned long start_jiffies;
1844     struct pci_dev *child, *parent = link->pdev;
1845     struct pci_bus *linkbus = parent->subordinate;
1846     /*
1847     @@ -265,21 +296,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
1848     reg16 &= ~PCI_EXP_LNKCTL_CCC;
1849     pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
1850    
1851     - /* Retrain link */
1852     - reg16 |= PCI_EXP_LNKCTL_RL;
1853     - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
1854     -
1855     - /* Wait for link training end. Break out after waiting for timeout */
1856     - start_jiffies = jiffies;
1857     - for (;;) {
1858     - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
1859     - if (!(reg16 & PCI_EXP_LNKSTA_LT))
1860     - break;
1861     - if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
1862     - break;
1863     - msleep(1);
1864     - }
1865     - if (!(reg16 & PCI_EXP_LNKSTA_LT))
1866     + if (pcie_retrain_link(link))
1867     return;
1868    
1869     /* Training failed. Restore common clock configurations */
1870     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
1871     index 4a4c16bfc0d3..fa4c386c8cd8 100644
1872     --- a/drivers/pci/probe.c
1873     +++ b/drivers/pci/probe.c
1874     @@ -535,16 +535,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
1875     kfree(to_pci_host_bridge(dev));
1876     }
1877    
1878     -struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
1879     +static void pci_init_host_bridge(struct pci_host_bridge *bridge)
1880     {
1881     - struct pci_host_bridge *bridge;
1882     -
1883     - bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
1884     - if (!bridge)
1885     - return NULL;
1886     -
1887     INIT_LIST_HEAD(&bridge->windows);
1888     - bridge->dev.release = pci_release_host_bridge_dev;
1889    
1890     /*
1891     * We assume we can manage these PCIe features. Some systems may
1892     @@ -557,6 +550,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
1893     bridge->native_shpc_hotplug = 1;
1894     bridge->native_pme = 1;
1895     bridge->native_ltr = 1;
1896     +}
1897     +
1898     +struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
1899     +{
1900     + struct pci_host_bridge *bridge;
1901     +
1902     + bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
1903     + if (!bridge)
1904     + return NULL;
1905     +
1906     + pci_init_host_bridge(bridge);
1907     + bridge->dev.release = pci_release_host_bridge_dev;
1908    
1909     return bridge;
1910     }
1911     @@ -571,7 +576,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
1912     if (!bridge)
1913     return NULL;
1914    
1915     - INIT_LIST_HEAD(&bridge->windows);
1916     + pci_init_host_bridge(bridge);
1917     bridge->dev.release = devm_pci_release_host_bridge_dev;
1918    
1919     return bridge;
1920     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1921     index 37d897bc4cf1..28c64f84bfe7 100644
1922     --- a/drivers/pci/quirks.c
1923     +++ b/drivers/pci/quirks.c
1924     @@ -2220,6 +2220,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
1925     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
1926     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
1927    
1928     +/*
1929     + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
1930     + * Link bit cleared after starting the link retrain process to allow this
1931     + * process to finish.
1932     + *
1933     + * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
1934     + * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
1935     + */
1936     +static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
1937     +{
1938     + dev->clear_retrain_link = 1;
1939     + pci_info(dev, "Enable PCIe Retrain Link quirk\n");
1940     +}
1941     +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
1942     +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
1943     +DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
1944     +
1945     static void fixup_rev1_53c810(struct pci_dev *dev)
1946     {
1947     u32 class = dev->class;
1948     @@ -3383,6 +3400,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
1949     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
1950     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
1951     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
1952     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
1953    
1954     /*
1955     * Root port on some Cavium CN8xxx chips do not successfully complete a bus
1956     @@ -4878,6 +4896,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
1957    
1958     /* AMD Stoney platform GPU */
1959     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
1960     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
1961     #endif /* CONFIG_PCI_ATS */
1962    
1963     /* Freescale PCIe doesn't support MSI in RC mode */
1964     diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
1965     index 68ce4a082b9b..693acc167351 100644
1966     --- a/drivers/phy/ti/phy-ti-pipe3.c
1967     +++ b/drivers/phy/ti/phy-ti-pipe3.c
1968     @@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
1969    
1970     val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
1971     val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
1972     - val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
1973     + val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
1974     ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
1975    
1976     val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
1977     diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
1978     index 98ba07869c3b..3bae02380bb2 100644
1979     --- a/drivers/power/supply/cpcap-battery.c
1980     +++ b/drivers/power/supply/cpcap-battery.c
1981     @@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
1982     int avg_current;
1983     u32 cc_lsb;
1984    
1985     + if (!divider)
1986     + return 0;
1987     +
1988     sample &= 0xffffff; /* 24-bits, unsigned */
1989     offset &= 0x7ff; /* 10-bits, signed */
1990    
1991     diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
1992     index 6170ed8b6854..5a2757a7f408 100644
1993     --- a/drivers/power/supply/power_supply_sysfs.c
1994     +++ b/drivers/power/supply/power_supply_sysfs.c
1995     @@ -382,15 +382,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
1996     char *prop_buf;
1997     char *attrname;
1998    
1999     - dev_dbg(dev, "uevent\n");
2000     -
2001     if (!psy || !psy->desc) {
2002     dev_dbg(dev, "No power supply yet\n");
2003     return ret;
2004     }
2005    
2006     - dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
2007     -
2008     ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
2009     if (ret)
2010     return ret;
2011     @@ -426,8 +422,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
2012     goto out;
2013     }
2014    
2015     - dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
2016     -
2017     ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
2018     kfree(attrname);
2019     if (ret)
2020     diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
2021     index e22f1239a318..d17ce1fb4ef5 100644
2022     --- a/drivers/staging/media/imx/imx-media-csi.c
2023     +++ b/drivers/staging/media/imx/imx-media-csi.c
2024     @@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
2025     /*
2026     * Parses the fwnode endpoint from the source pad of the entity
2027     * connected to this CSI. This will either be the entity directly
2028     - * upstream from the CSI-2 receiver, or directly upstream from the
2029     - * video mux. The endpoint is needed to determine the bus type and
2030     - * bus config coming into the CSI.
2031     + * upstream from the CSI-2 receiver, directly upstream from the
2032     + * video mux, or directly upstream from the CSI itself. The endpoint
2033     + * is needed to determine the bus type and bus config coming into
2034     + * the CSI.
2035     */
2036     static int csi_get_upstream_endpoint(struct csi_priv *priv,
2037     struct v4l2_fwnode_endpoint *ep)
2038     @@ -168,7 +169,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2039     if (!priv->src_sd)
2040     return -EPIPE;
2041    
2042     - src = &priv->src_sd->entity;
2043     + sd = priv->src_sd;
2044     + src = &sd->entity;
2045    
2046     if (src->function == MEDIA_ENT_F_VID_MUX) {
2047     /*
2048     @@ -182,6 +184,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2049     src = &sd->entity;
2050     }
2051    
2052     + /*
2053     + * If the source is neither the video mux nor the CSI-2 receiver,
2054     + * get the source pad directly upstream from CSI itself.
2055     + */
2056     + if (src->function != MEDIA_ENT_F_VID_MUX &&
2057     + sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
2058     + src = &priv->sd.entity;
2059     +
2060     /* get source pad of entity directly upstream from src */
2061     pad = imx_media_find_upstream_pad(priv->md, src, 0);
2062     if (IS_ERR(pad))
2063     diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
2064     index acde372c6795..1647da216bf9 100644
2065     --- a/drivers/staging/media/imx/imx-media-of.c
2066     +++ b/drivers/staging/media/imx/imx-media-of.c
2067     @@ -233,15 +233,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
2068     struct v4l2_subdev *csi)
2069     {
2070     struct device_node *csi_np = csi->dev->of_node;
2071     - struct fwnode_handle *fwnode, *csi_ep;
2072     - struct v4l2_fwnode_link link;
2073     struct device_node *ep;
2074     - int ret;
2075     -
2076     - link.local_node = of_fwnode_handle(csi_np);
2077     - link.local_port = CSI_SINK_PAD;
2078    
2079     for_each_child_of_node(csi_np, ep) {
2080     + struct fwnode_handle *fwnode, *csi_ep;
2081     + struct v4l2_fwnode_link link;
2082     + int ret;
2083     +
2084     + memset(&link, 0, sizeof(link));
2085     +
2086     + link.local_node = of_fwnode_handle(csi_np);
2087     + link.local_port = CSI_SINK_PAD;
2088     +
2089     csi_ep = of_fwnode_handle(ep);
2090    
2091     fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
2092     diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2093     index ba906876cc45..fd02e8a4841d 100644
2094     --- a/drivers/video/fbdev/efifb.c
2095     +++ b/drivers/video/fbdev/efifb.c
2096     @@ -476,8 +476,12 @@ static int efifb_probe(struct platform_device *dev)
2097     * If the UEFI memory map covers the efifb region, we may only
2098     * remap it using the attributes the memory map prescribes.
2099     */
2100     - mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
2101     - mem_flags &= md.attribute;
2102     + md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
2103     + EFI_MEMORY_WT | EFI_MEMORY_WB;
2104     + if (md.attribute) {
2105     + mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
2106     + mem_flags &= md.attribute;
2107     + }
2108     }
2109     if (mem_flags & EFI_MEMORY_WC)
2110     info->screen_base = ioremap_wc(efifb_fix.smem_start,
2111     diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
2112     index aad1cc4be34a..c7ebf03b8d53 100644
2113     --- a/drivers/video/fbdev/sm712.h
2114     +++ b/drivers/video/fbdev/sm712.h
2115     @@ -15,14 +15,10 @@
2116    
2117     #define FB_ACCEL_SMI_LYNX 88
2118    
2119     -#define SCREEN_X_RES 1024
2120     -#define SCREEN_Y_RES 600
2121     -#define SCREEN_BPP 16
2122     -
2123     -/*Assume SM712 graphics chip has 4MB VRAM */
2124     -#define SM712_VIDEOMEMORYSIZE 0x00400000
2125     -/*Assume SM722 graphics chip has 8MB VRAM */
2126     -#define SM722_VIDEOMEMORYSIZE 0x00800000
2127     +#define SCREEN_X_RES 1024
2128     +#define SCREEN_Y_RES_PC 768
2129     +#define SCREEN_Y_RES_NETBOOK 600
2130     +#define SCREEN_BPP 16
2131    
2132     #define dac_reg (0x3c8)
2133     #define dac_val (0x3c9)
2134     diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
2135     index 502d0de2feec..f1dcc6766d1e 100644
2136     --- a/drivers/video/fbdev/sm712fb.c
2137     +++ b/drivers/video/fbdev/sm712fb.c
2138     @@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
2139     0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
2140     },
2141     },
2142     + { /* 1024 x 768 16Bpp 60Hz */
2143     + 1024, 768, 16, 60,
2144     + /* Init_MISC */
2145     + 0xEB,
2146     + { /* Init_SR0_SR4 */
2147     + 0x03, 0x01, 0x0F, 0x03, 0x0E,
2148     + },
2149     + { /* Init_SR10_SR24 */
2150     + 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
2151     + 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
2152     + 0xC4, 0x30, 0x02, 0x01, 0x01,
2153     + },
2154     + { /* Init_SR30_SR75 */
2155     + 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
2156     + 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
2157     + 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
2158     + 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
2159     + 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
2160     + 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
2161     + 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
2162     + 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
2163     + 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
2164     + },
2165     + { /* Init_SR80_SR93 */
2166     + 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
2167     + 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
2168     + 0x00, 0x00, 0x00, 0x00,
2169     + },
2170     + { /* Init_SRA0_SRAF */
2171     + 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
2172     + 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
2173     + },
2174     + { /* Init_GR00_GR08 */
2175     + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
2176     + 0xFF,
2177     + },
2178     + { /* Init_AR00_AR14 */
2179     + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2180     + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
2181     + 0x41, 0x00, 0x0F, 0x00, 0x00,
2182     + },
2183     + { /* Init_CR00_CR18 */
2184     + 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
2185     + 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2186     + 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
2187     + 0xFF,
2188     + },
2189     + { /* Init_CR30_CR4D */
2190     + 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
2191     + 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
2192     + 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
2193     + 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
2194     + },
2195     + { /* Init_CR90_CRA7 */
2196     + 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
2197     + 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
2198     + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
2199     + },
2200     + },
2201     { /* mode#5: 1024 x 768 24Bpp 60Hz */
2202     1024, 768, 24, 60,
2203     /* Init_MISC */
2204     @@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
2205    
2206     static int smtc_blank(int blank_mode, struct fb_info *info)
2207     {
2208     + struct smtcfb_info *sfb = info->par;
2209     +
2210     /* clear DPMS setting */
2211     switch (blank_mode) {
2212     case FB_BLANK_UNBLANK:
2213     /* Screen On: HSync: On, VSync : On */
2214     +
2215     + switch (sfb->chip_id) {
2216     + case 0x710:
2217     + case 0x712:
2218     + smtc_seqw(0x6a, 0x16);
2219     + smtc_seqw(0x6b, 0x02);
2220     + break;
2221     + case 0x720:
2222     + smtc_seqw(0x6a, 0x0d);
2223     + smtc_seqw(0x6b, 0x02);
2224     + break;
2225     + }
2226     +
2227     + smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2228     smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
2229     - smtc_seqw(0x6a, 0x16);
2230     - smtc_seqw(0x6b, 0x02);
2231     smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
2232     smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
2233     - smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2234     - smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2235     smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
2236     + smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2237     break;
2238     case FB_BLANK_NORMAL:
2239     /* Screen Off: HSync: On, VSync : On Soft blank */
2240     + smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2241     + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2242     + smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2243     smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
2244     + smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
2245     smtc_seqw(0x6a, 0x16);
2246     smtc_seqw(0x6b, 0x02);
2247     - smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
2248     - smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2249     - smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2250     - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2251     break;
2252     case FB_BLANK_VSYNC_SUSPEND:
2253     /* Screen On: HSync: On, VSync : Off */
2254     + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2255     + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2256     + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
2257     smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
2258     - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2259     - smtc_seqw(0x6a, 0x0c);
2260     - smtc_seqw(0x6b, 0x02);
2261     smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
2262     + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2263     smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
2264     - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
2265     - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2266     - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2267     smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
2268     + smtc_seqw(0x6a, 0x0c);
2269     + smtc_seqw(0x6b, 0x02);
2270     break;
2271     case FB_BLANK_HSYNC_SUSPEND:
2272     /* Screen On: HSync: Off, VSync : On */
2273     + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2274     + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2275     + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
2276     smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
2277     - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2278     - smtc_seqw(0x6a, 0x0c);
2279     - smtc_seqw(0x6b, 0x02);
2280     smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
2281     + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2282     smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
2283     - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
2284     - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2285     - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2286     smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
2287     + smtc_seqw(0x6a, 0x0c);
2288     + smtc_seqw(0x6b, 0x02);
2289     break;
2290     case FB_BLANK_POWERDOWN:
2291     /* Screen On: HSync: Off, VSync : Off */
2292     + smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2293     + smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2294     + smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
2295     smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
2296     - smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2297     - smtc_seqw(0x6a, 0x0c);
2298     - smtc_seqw(0x6b, 0x02);
2299     smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
2300     + smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
2301     smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
2302     - smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
2303     - smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
2304     - smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2305     smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
2306     + smtc_seqw(0x6a, 0x0c);
2307     + smtc_seqw(0x6b, 0x02);
2308     break;
2309     default:
2310     return -EINVAL;
2311     @@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
2312    
2313     /* init SEQ register SR30 - SR75 */
2314     for (i = 0; i < SIZE_SR30_SR75; i++)
2315     - if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
2316     - (i + 0x30) != 0x6b)
2317     + if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
2318     + (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
2319     + (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
2320     + (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
2321     smtc_seqw(i + 0x30,
2322     vgamode[j].init_sr30_sr75[i]);
2323    
2324     @@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
2325     smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
2326    
2327     /* init CRTC register CR30 - CR4D */
2328     - for (i = 0; i < SIZE_CR30_CR4D; i++)
2329     + for (i = 0; i < SIZE_CR30_CR4D; i++) {
2330     + if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
2331     + /* side-effect, don't write to CR3B-CR3F */
2332     + continue;
2333     smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
2334     + }
2335    
2336     /* init CRTC register CR90 - CRA7 */
2337     for (i = 0; i < SIZE_CR90_CRA7; i++)
2338     @@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
2339     {
2340     sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
2341    
2342     + if (sfb->chip_id == 0x720)
2343     + /* on SM720, the framebuffer starts at the 1 MB offset */
2344     + sfb->fb->fix.smem_start += 0x00200000;
2345     +
2346     + /* XXX: is it safe for SM720 on Big-Endian? */
2347     if (sfb->fb->var.bits_per_pixel == 32)
2348     sfb->fb->fix.smem_start += big_addr;
2349    
2350     @@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
2351     outb_p(0x11, 0x3c5);
2352     }
2353    
2354     +static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
2355     +{
2356     + u8 vram;
2357     +
2358     + switch (sfb->chip_id) {
2359     + case 0x710:
2360     + case 0x712:
2361     + /*
2362     + * Assume SM712 graphics chip has 4MB VRAM.
2363     + *
2364     + * FIXME: SM712 can have 2MB VRAM, which is used on earlier
2365     + * laptops, such as IBM Thinkpad 240X. This driver would
2366     + * probably crash on those machines. If anyone gets one of
2367     + * those and is willing to help, run "git blame" and send me
2368     + * an E-mail.
2369     + */
2370     + return 0x00400000;
2371     + case 0x720:
2372     + outb_p(0x76, 0x3c4);
2373     + vram = inb_p(0x3c5) >> 6;
2374     +
2375     + if (vram == 0x00)
2376     + return 0x00800000; /* 8 MB */
2377     + else if (vram == 0x01)
2378     + return 0x01000000; /* 16 MB */
2379     + else if (vram == 0x02)
2380     + return 0x00400000; /* illegal, fallback to 4 MB */
2381     + else if (vram == 0x03)
2382     + return 0x00400000; /* 4 MB */
2383     + }
2384     + return 0; /* unknown hardware */
2385     +}
2386     +
2387     +static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
2388     +{
2389     + /* get mode parameter from smtc_scr_info */
2390     + if (smtc_scr_info.lfb_width != 0) {
2391     + sfb->fb->var.xres = smtc_scr_info.lfb_width;
2392     + sfb->fb->var.yres = smtc_scr_info.lfb_height;
2393     + sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
2394     + goto final;
2395     + }
2396     +
2397     + /*
2398     + * No parameter, default resolution is 1024x768-16.
2399     + *
2400     + * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
2401     + * panel, also see the comments about Thinkpad 240X above.
2402     + */
2403     + sfb->fb->var.xres = SCREEN_X_RES;
2404     + sfb->fb->var.yres = SCREEN_Y_RES_PC;
2405     + sfb->fb->var.bits_per_pixel = SCREEN_BPP;
2406     +
2407     +#ifdef CONFIG_MIPS
2408     + /*
2409     + * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
2410     + * target platform of this driver, but nearly all old x86 laptops have
2411     + * 1024x768. Lighting 768 panels using 600's timings would partially
2412     + * garble the display, so we don't want that. But it's not possible to
2413     + * distinguish them reliably.
2414     + *
2415     + * So we change the default to 768, but keep 600 as-is on MIPS.
2416     + */
2417     + sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
2418     +#endif
2419     +
2420     +final:
2421     + big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
2422     +}
2423     +
2424     static int smtcfb_pci_probe(struct pci_dev *pdev,
2425     const struct pci_device_id *ent)
2426     {
2427     struct smtcfb_info *sfb;
2428     struct fb_info *info;
2429     - u_long smem_size = 0x00800000; /* default 8MB */
2430     + u_long smem_size;
2431     int err;
2432     unsigned long mmio_base;
2433    
2434     @@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
2435    
2436     sm7xx_init_hw();
2437    
2438     - /* get mode parameter from smtc_scr_info */
2439     - if (smtc_scr_info.lfb_width != 0) {
2440     - sfb->fb->var.xres = smtc_scr_info.lfb_width;
2441     - sfb->fb->var.yres = smtc_scr_info.lfb_height;
2442     - sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
2443     - } else {
2444     - /* default resolution 1024x600 16bit mode */
2445     - sfb->fb->var.xres = SCREEN_X_RES;
2446     - sfb->fb->var.yres = SCREEN_Y_RES;
2447     - sfb->fb->var.bits_per_pixel = SCREEN_BPP;
2448     - }
2449     -
2450     - big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
2451     /* Map address and memory detection */
2452     mmio_base = pci_resource_start(pdev, 0);
2453     pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
2454    
2455     + smem_size = sm7xx_vram_probe(sfb);
2456     + dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
2457     + smem_size / 1048576);
2458     +
2459     switch (sfb->chip_id) {
2460     case 0x710:
2461     case 0x712:
2462     sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
2463     sfb->fb->fix.mmio_len = 0x00400000;
2464     - smem_size = SM712_VIDEOMEMORYSIZE;
2465     sfb->lfb = ioremap(mmio_base, mmio_addr);
2466     if (!sfb->lfb) {
2467     dev_err(&pdev->dev,
2468     @@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
2469     case 0x720:
2470     sfb->fb->fix.mmio_start = mmio_base;
2471     sfb->fb->fix.mmio_len = 0x00200000;
2472     - smem_size = SM722_VIDEOMEMORYSIZE;
2473     - sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
2474     + sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
2475     sfb->lfb = sfb->dp_regs + 0x00200000;
2476     sfb->mmio = (smtc_regbaseaddress =
2477     sfb->dp_regs + 0x000c0000);
2478     @@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
2479     goto failed_fb;
2480     }
2481    
2482     + /* probe and decide resolution */
2483     + sm7xx_resolution_probe(sfb);
2484     +
2485     /* can support 32 bpp */
2486     if (sfb->fb->var.bits_per_pixel == 15)
2487     sfb->fb->var.bits_per_pixel = 16;
2488     @@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
2489     if (err)
2490     goto failed;
2491    
2492     - smtcfb_setmode(sfb);
2493     + /*
2494     + * The screen would be temporarily garbled when sm712fb takes over
2495     + * vesafb or VGA text mode. Zero the framebuffer.
2496     + */
2497     + memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
2498    
2499     err = register_framebuffer(info);
2500     if (err < 0)
2501     diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
2502     index 070026a7e55a..2001910fd241 100644
2503     --- a/drivers/video/fbdev/udlfb.c
2504     +++ b/drivers/video/fbdev/udlfb.c
2505     @@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
2506     return 0;
2507     }
2508    
2509     -static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
2510     - int width, int height, char *data)
2511     +static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
2512     {
2513     int i, ret;
2514     char *cmd;
2515     @@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
2516    
2517     start_cycles = get_cycles();
2518    
2519     + mutex_lock(&dlfb->render_mutex);
2520     +
2521     aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
2522     width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
2523     x = aligned_x;
2524    
2525     if ((width <= 0) ||
2526     (x + width > dlfb->info->var.xres) ||
2527     - (y + height > dlfb->info->var.yres))
2528     - return -EINVAL;
2529     + (y + height > dlfb->info->var.yres)) {
2530     + ret = -EINVAL;
2531     + goto unlock_ret;
2532     + }
2533    
2534     - if (!atomic_read(&dlfb->usb_active))
2535     - return 0;
2536     + if (!atomic_read(&dlfb->usb_active)) {
2537     + ret = 0;
2538     + goto unlock_ret;
2539     + }
2540    
2541     urb = dlfb_get_urb(dlfb);
2542     - if (!urb)
2543     - return 0;
2544     + if (!urb) {
2545     + ret = 0;
2546     + goto unlock_ret;
2547     + }
2548     cmd = urb->transfer_buffer;
2549    
2550     for (i = y; i < y + height ; i++) {
2551     @@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
2552     *cmd++ = 0xAF;
2553     /* Send partial buffer remaining before exiting */
2554     len = cmd - (char *) urb->transfer_buffer;
2555     - ret = dlfb_submit_urb(dlfb, urb, len);
2556     + dlfb_submit_urb(dlfb, urb, len);
2557     bytes_sent += len;
2558     } else
2559     dlfb_urb_completion(urb);
2560     @@ -655,7 +662,55 @@ error:
2561     >> 10)), /* Kcycles */
2562     &dlfb->cpu_kcycles_used);
2563    
2564     - return 0;
2565     + ret = 0;
2566     +
2567     +unlock_ret:
2568     + mutex_unlock(&dlfb->render_mutex);
2569     + return ret;
2570     +}
2571     +
2572     +static void dlfb_init_damage(struct dlfb_data *dlfb)
2573     +{
2574     + dlfb->damage_x = INT_MAX;
2575     + dlfb->damage_x2 = 0;
2576     + dlfb->damage_y = INT_MAX;
2577     + dlfb->damage_y2 = 0;
2578     +}
2579     +
2580     +static void dlfb_damage_work(struct work_struct *w)
2581     +{
2582     + struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
2583     + int x, x2, y, y2;
2584     +
2585     + spin_lock_irq(&dlfb->damage_lock);
2586     + x = dlfb->damage_x;
2587     + x2 = dlfb->damage_x2;
2588     + y = dlfb->damage_y;
2589     + y2 = dlfb->damage_y2;
2590     + dlfb_init_damage(dlfb);
2591     + spin_unlock_irq(&dlfb->damage_lock);
2592     +
2593     + if (x < x2 && y < y2)
2594     + dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
2595     +}
2596     +
2597     +static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
2598     +{
2599     + unsigned long flags;
2600     + int x2 = x + width;
2601     + int y2 = y + height;
2602     +
2603     + if (x >= x2 || y >= y2)
2604     + return;
2605     +
2606     + spin_lock_irqsave(&dlfb->damage_lock, flags);
2607     + dlfb->damage_x = min(x, dlfb->damage_x);
2608     + dlfb->damage_x2 = max(x2, dlfb->damage_x2);
2609     + dlfb->damage_y = min(y, dlfb->damage_y);
2610     + dlfb->damage_y2 = max(y2, dlfb->damage_y2);
2611     + spin_unlock_irqrestore(&dlfb->damage_lock, flags);
2612     +
2613     + schedule_work(&dlfb->damage_work);
2614     }
2615    
2616     /*
2617     @@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
2618     (u32)info->var.yres);
2619    
2620     dlfb_handle_damage(dlfb, 0, start, info->var.xres,
2621     - lines, info->screen_base);
2622     + lines);
2623     }
2624    
2625     return result;
2626     @@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
2627    
2628     sys_copyarea(info, area);
2629    
2630     - dlfb_handle_damage(dlfb, area->dx, area->dy,
2631     - area->width, area->height, info->screen_base);
2632     + dlfb_offload_damage(dlfb, area->dx, area->dy,
2633     + area->width, area->height);
2634     }
2635    
2636     static void dlfb_ops_imageblit(struct fb_info *info,
2637     @@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
2638    
2639     sys_imageblit(info, image);
2640    
2641     - dlfb_handle_damage(dlfb, image->dx, image->dy,
2642     - image->width, image->height, info->screen_base);
2643     + dlfb_offload_damage(dlfb, image->dx, image->dy,
2644     + image->width, image->height);
2645     }
2646    
2647     static void dlfb_ops_fillrect(struct fb_info *info,
2648     @@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
2649    
2650     sys_fillrect(info, rect);
2651    
2652     - dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
2653     - rect->height, info->screen_base);
2654     + dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
2655     + rect->height);
2656     }
2657    
2658     /*
2659     @@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
2660     int bytes_identical = 0;
2661     int bytes_rendered = 0;
2662    
2663     + mutex_lock(&dlfb->render_mutex);
2664     +
2665     if (!fb_defio)
2666     - return;
2667     + goto unlock_ret;
2668    
2669     if (!atomic_read(&dlfb->usb_active))
2670     - return;
2671     + goto unlock_ret;
2672    
2673     start_cycles = get_cycles();
2674    
2675     urb = dlfb_get_urb(dlfb);
2676     if (!urb)
2677     - return;
2678     + goto unlock_ret;
2679    
2680     cmd = urb->transfer_buffer;
2681    
2682     @@ -782,6 +839,8 @@ error:
2683     atomic_add(((unsigned int) ((end_cycles - start_cycles)
2684     >> 10)), /* Kcycles */
2685     &dlfb->cpu_kcycles_used);
2686     +unlock_ret:
2687     + mutex_unlock(&dlfb->render_mutex);
2688     }
2689    
2690     static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
2691     @@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
2692     if (area.y > info->var.yres)
2693     area.y = info->var.yres;
2694    
2695     - dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
2696     - info->screen_base);
2697     + dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
2698     }
2699    
2700     return 0;
2701     @@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
2702     {
2703     struct dlfb_data *dlfb = info->par;
2704    
2705     + cancel_work_sync(&dlfb->damage_work);
2706     +
2707     + mutex_destroy(&dlfb->render_mutex);
2708     +
2709     if (info->cmap.len != 0)
2710     fb_dealloc_cmap(&info->cmap);
2711     if (info->monspecs.modedb)
2712     @@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
2713     pix_framebuffer[i] = 0x37e6;
2714     }
2715    
2716     - dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
2717     - info->screen_base);
2718     + dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
2719    
2720     return 0;
2721     }
2722     @@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
2723     dlfb->ops = dlfb_ops;
2724     info->fbops = &dlfb->ops;
2725    
2726     + mutex_init(&dlfb->render_mutex);
2727     + dlfb_init_damage(dlfb);
2728     + spin_lock_init(&dlfb->damage_lock);
2729     + INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
2730     +
2731     INIT_LIST_HEAD(&info->modelist);
2732    
2733     if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
2734     diff --git a/fs/ceph/super.c b/fs/ceph/super.c
2735     index eab1359d0553..c5cf46e43f2e 100644
2736     --- a/fs/ceph/super.c
2737     +++ b/fs/ceph/super.c
2738     @@ -819,6 +819,12 @@ static void ceph_umount_begin(struct super_block *sb)
2739     return;
2740     }
2741    
2742     +static int ceph_remount(struct super_block *sb, int *flags, char *data)
2743     +{
2744     + sync_filesystem(sb);
2745     + return 0;
2746     +}
2747     +
2748     static const struct super_operations ceph_super_ops = {
2749     .alloc_inode = ceph_alloc_inode,
2750     .destroy_inode = ceph_destroy_inode,
2751     @@ -826,6 +832,7 @@ static const struct super_operations ceph_super_ops = {
2752     .drop_inode = ceph_drop_inode,
2753     .sync_fs = ceph_sync_fs,
2754     .put_super = ceph_put_super,
2755     + .remount_fs = ceph_remount,
2756     .show_options = ceph_show_options,
2757     .statfs = ceph_statfs,
2758     .umount_begin = ceph_umount_begin,
2759     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2760     index 2001184afe70..0ccf8f9b63a2 100644
2761     --- a/fs/cifs/smb2ops.c
2762     +++ b/fs/cifs/smb2ops.c
2763     @@ -2348,26 +2348,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
2764     unsigned int epoch, bool *purge_cache)
2765     {
2766     char message[5] = {0};
2767     + unsigned int new_oplock = 0;
2768    
2769     oplock &= 0xFF;
2770     if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
2771     return;
2772    
2773     - cinode->oplock = 0;
2774     if (oplock & SMB2_LEASE_READ_CACHING_HE) {
2775     - cinode->oplock |= CIFS_CACHE_READ_FLG;
2776     + new_oplock |= CIFS_CACHE_READ_FLG;
2777     strcat(message, "R");
2778     }
2779     if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
2780     - cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
2781     + new_oplock |= CIFS_CACHE_HANDLE_FLG;
2782     strcat(message, "H");
2783     }
2784     if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
2785     - cinode->oplock |= CIFS_CACHE_WRITE_FLG;
2786     + new_oplock |= CIFS_CACHE_WRITE_FLG;
2787     strcat(message, "W");
2788     }
2789     - if (!cinode->oplock)
2790     - strcat(message, "None");
2791     + if (!new_oplock)
2792     + strncpy(message, "None", sizeof(message));
2793     +
2794     + cinode->oplock = new_oplock;
2795     cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
2796     &cinode->vfs_inode);
2797     }
2798     diff --git a/fs/dcache.c b/fs/dcache.c
2799     index cb515f183482..6e0022326afe 100644
2800     --- a/fs/dcache.c
2801     +++ b/fs/dcache.c
2802     @@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry)
2803     }
2804     }
2805     /* if dentry was never visible to RCU, immediate free is OK */
2806     - if (!(dentry->d_flags & DCACHE_RCUACCESS))
2807     + if (dentry->d_flags & DCACHE_NORCU)
2808     __d_free(&dentry->d_u.d_rcu);
2809     else
2810     call_rcu(&dentry->d_u.d_rcu, __d_free);
2811     @@ -1694,7 +1694,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
2812     struct dentry *dentry = __d_alloc(parent->d_sb, name);
2813     if (!dentry)
2814     return NULL;
2815     - dentry->d_flags |= DCACHE_RCUACCESS;
2816     spin_lock(&parent->d_lock);
2817     /*
2818     * don't need child lock because it is not subject
2819     @@ -1719,7 +1718,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
2820     {
2821     struct dentry *dentry = d_alloc_anon(parent->d_sb);
2822     if (dentry) {
2823     - dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
2824     + dentry->d_flags |= DCACHE_DENTRY_CURSOR;
2825     dentry->d_parent = dget(parent);
2826     }
2827     return dentry;
2828     @@ -1732,10 +1731,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
2829     *
2830     * For a filesystem that just pins its dentries in memory and never
2831     * performs lookups at all, return an unhashed IS_ROOT dentry.
2832     + * This is used for pipes, sockets et.al. - the stuff that should
2833     + * never be anyone's children or parents. Unlike all other
2834     + * dentries, these will not have RCU delay between dropping the
2835     + * last reference and freeing them.
2836     */
2837     struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
2838     {
2839     - return __d_alloc(sb, name);
2840     + struct dentry *dentry = __d_alloc(sb, name);
2841     + if (likely(dentry))
2842     + dentry->d_flags |= DCACHE_NORCU;
2843     + return dentry;
2844     }
2845     EXPORT_SYMBOL(d_alloc_pseudo);
2846    
2847     @@ -1899,12 +1905,10 @@ struct dentry *d_make_root(struct inode *root_inode)
2848    
2849     if (root_inode) {
2850     res = d_alloc_anon(root_inode->i_sb);
2851     - if (res) {
2852     - res->d_flags |= DCACHE_RCUACCESS;
2853     + if (res)
2854     d_instantiate(res, root_inode);
2855     - } else {
2856     + else
2857     iput(root_inode);
2858     - }
2859     }
2860     return res;
2861     }
2862     @@ -2769,9 +2773,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2863     copy_name(dentry, target);
2864     target->d_hash.pprev = NULL;
2865     dentry->d_parent->d_lockref.count++;
2866     - if (dentry == old_parent)
2867     - dentry->d_flags |= DCACHE_RCUACCESS;
2868     - else
2869     + if (dentry != old_parent) /* wasn't IS_ROOT */
2870     WARN_ON(!--old_parent->d_lockref.count);
2871     } else {
2872     target->d_parent = old_parent;
2873     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
2874     index bd500c3b7858..59e8bb72dc14 100644
2875     --- a/fs/fuse/file.c
2876     +++ b/fs/fuse/file.c
2877     @@ -179,7 +179,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
2878     file->f_op = &fuse_direct_io_file_operations;
2879     if (!(ff->open_flags & FOPEN_KEEP_CACHE))
2880     invalidate_inode_pages2(inode->i_mapping);
2881     - if (ff->open_flags & FOPEN_NONSEEKABLE)
2882     + if (ff->open_flags & FOPEN_STREAM)
2883     + stream_open(inode, file);
2884     + else if (ff->open_flags & FOPEN_NONSEEKABLE)
2885     nonseekable_open(inode, file);
2886     if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
2887     struct fuse_inode *fi = get_fuse_inode(inode);
2888     @@ -1526,7 +1528,7 @@ __acquires(fc->lock)
2889     {
2890     struct fuse_conn *fc = get_fuse_conn(inode);
2891     struct fuse_inode *fi = get_fuse_inode(inode);
2892     - size_t crop = i_size_read(inode);
2893     + loff_t crop = i_size_read(inode);
2894     struct fuse_req *req;
2895    
2896     while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
2897     @@ -2975,6 +2977,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2898     }
2899     }
2900    
2901     + if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2902     + offset + length > i_size_read(inode)) {
2903     + err = inode_newsize_ok(inode, offset + length);
2904     + if (err)
2905     + return err;
2906     + }
2907     +
2908     if (!(mode & FALLOC_FL_KEEP_SIZE))
2909     set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2910    
2911     diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
2912     index d175724ff566..2478a69da0f0 100644
2913     --- a/fs/nfs/filelayout/filelayout.c
2914     +++ b/fs/nfs/filelayout/filelayout.c
2915     @@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino,
2916     status = filelayout_check_deviceid(lo, fl, gfp_flags);
2917     if (status) {
2918     pnfs_put_lseg(lseg);
2919     - lseg = ERR_PTR(status);
2920     + lseg = NULL;
2921     }
2922     out:
2923     return lseg;
2924     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2925     index d2f645d34eb1..3ba2087469ac 100644
2926     --- a/fs/nfs/nfs4state.c
2927     +++ b/fs/nfs/nfs4state.c
2928     @@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
2929     /* Sustain the lease, even if it's empty. If the clientid4
2930     * goes stale it's of no use for trunking discovery. */
2931     nfs4_schedule_state_renewal(*result);
2932     +
2933     + /* If the client state need to recover, do it. */
2934     + if (clp->cl_state)
2935     + nfs4_schedule_state_manager(clp);
2936     }
2937     out:
2938     return status;
2939     diff --git a/fs/nsfs.c b/fs/nsfs.c
2940     index 60702d677bd4..30d150a4f0c6 100644
2941     --- a/fs/nsfs.c
2942     +++ b/fs/nsfs.c
2943     @@ -85,13 +85,12 @@ slow:
2944     inode->i_fop = &ns_file_operations;
2945     inode->i_private = ns;
2946    
2947     - dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
2948     + dentry = d_alloc_anon(mnt->mnt_sb);
2949     if (!dentry) {
2950     iput(inode);
2951     return ERR_PTR(-ENOMEM);
2952     }
2953     d_instantiate(dentry, inode);
2954     - dentry->d_flags |= DCACHE_RCUACCESS;
2955     dentry->d_fsdata = (void *)ns->ops;
2956     d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
2957     if (d) {
2958     diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
2959     index 75eeee08d848..ffc73600216b 100644
2960     --- a/fs/overlayfs/copy_up.c
2961     +++ b/fs/overlayfs/copy_up.c
2962     @@ -878,14 +878,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
2963     return true;
2964     }
2965    
2966     -int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
2967     +int ovl_maybe_copy_up(struct dentry *dentry, int flags)
2968     {
2969     int err = 0;
2970    
2971     - if (ovl_open_need_copy_up(dentry, file_flags)) {
2972     + if (ovl_open_need_copy_up(dentry, flags)) {
2973     err = ovl_want_write(dentry);
2974     if (!err) {
2975     - err = ovl_copy_up_flags(dentry, file_flags);
2976     + err = ovl_copy_up_flags(dentry, flags);
2977     ovl_drop_write(dentry);
2978     }
2979     }
2980     diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
2981     index 986313da0c88..0c810f20f778 100644
2982     --- a/fs/overlayfs/file.c
2983     +++ b/fs/overlayfs/file.c
2984     @@ -116,11 +116,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
2985    
2986     static int ovl_open(struct inode *inode, struct file *file)
2987     {
2988     - struct dentry *dentry = file_dentry(file);
2989     struct file *realfile;
2990     int err;
2991    
2992     - err = ovl_open_maybe_copy_up(dentry, file->f_flags);
2993     + err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
2994     if (err)
2995     return err;
2996    
2997     @@ -390,7 +389,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2998     if (ret)
2999     return ret;
3000    
3001     - ret = ovl_copy_up_with_data(file_dentry(file));
3002     + ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
3003     if (!ret) {
3004     ret = ovl_real_ioctl(file, cmd, arg);
3005    
3006     diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
3007     index d9c16ceebfe7..80fb66426760 100644
3008     --- a/fs/overlayfs/overlayfs.h
3009     +++ b/fs/overlayfs/overlayfs.h
3010     @@ -411,7 +411,7 @@ extern const struct file_operations ovl_file_operations;
3011     int ovl_copy_up(struct dentry *dentry);
3012     int ovl_copy_up_with_data(struct dentry *dentry);
3013     int ovl_copy_up_flags(struct dentry *dentry, int flags);
3014     -int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
3015     +int ovl_maybe_copy_up(struct dentry *dentry, int flags);
3016     int ovl_copy_xattr(struct dentry *old, struct dentry *new);
3017     int ovl_set_attr(struct dentry *upper, struct kstat *stat);
3018     struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
3019     diff --git a/fs/proc/base.c b/fs/proc/base.c
3020     index 81d77b15b347..f999e8bd3771 100644
3021     --- a/fs/proc/base.c
3022     +++ b/fs/proc/base.c
3023     @@ -2542,6 +2542,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
3024     rcu_read_unlock();
3025     return -EACCES;
3026     }
3027     + /* Prevent changes to overridden credentials. */
3028     + if (current_cred() != current_real_cred()) {
3029     + rcu_read_unlock();
3030     + return -EBUSY;
3031     + }
3032     rcu_read_unlock();
3033    
3034     if (count > PAGE_SIZE)
3035     diff --git a/fs/ufs/util.h b/fs/ufs/util.h
3036     index 1fd3011ea623..7fd4802222b8 100644
3037     --- a/fs/ufs/util.h
3038     +++ b/fs/ufs/util.h
3039     @@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
3040     case UFS_UID_44BSD:
3041     return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
3042     case UFS_UID_EFT:
3043     - if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
3044     + if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
3045     return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
3046     /* Fall through */
3047     default:
3048     diff --git a/include/linux/bpf.h b/include/linux/bpf.h
3049     index 523481a3471b..16f6beef5cad 100644
3050     --- a/include/linux/bpf.h
3051     +++ b/include/linux/bpf.h
3052     @@ -34,6 +34,7 @@ struct bpf_map_ops {
3053     void (*map_free)(struct bpf_map *map);
3054     int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
3055     void (*map_release_uref)(struct bpf_map *map);
3056     + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
3057    
3058     /* funcs callable from userspace and from eBPF programs */
3059     void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3060     @@ -400,7 +401,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
3061     } \
3062     _out: \
3063     rcu_read_unlock(); \
3064     - preempt_enable_no_resched(); \
3065     + preempt_enable(); \
3066     _ret; \
3067     })
3068    
3069     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
3070     index ef4b70f64f33..0880baefd85f 100644
3071     --- a/include/linux/dcache.h
3072     +++ b/include/linux/dcache.h
3073     @@ -175,7 +175,6 @@ struct dentry_operations {
3074     * typically using d_splice_alias. */
3075    
3076     #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
3077     -#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
3078    
3079     #define DCACHE_CANT_MOUNT 0x00000100
3080     #define DCACHE_GENOCIDE 0x00000200
3081     @@ -216,6 +215,7 @@ struct dentry_operations {
3082    
3083     #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
3084     #define DCACHE_DENTRY_CURSOR 0x20000000
3085     +#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
3086    
3087     extern seqlock_t rename_lock;
3088    
3089     diff --git a/include/linux/of.h b/include/linux/of.h
3090     index 40e58b0e9cf4..f2c80cc4641d 100644
3091     --- a/include/linux/of.h
3092     +++ b/include/linux/of.h
3093     @@ -236,8 +236,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
3094     static inline u64 of_read_number(const __be32 *cell, int size)
3095     {
3096     u64 r = 0;
3097     - while (size--)
3098     - r = (r << 32) | be32_to_cpu(*(cell++));
3099     + for (; size--; cell++)
3100     + r = (r << 32) | be32_to_cpu(*cell);
3101     return r;
3102     }
3103    
3104     diff --git a/include/linux/pci.h b/include/linux/pci.h
3105     index 6925828f9f25..b1f297f4b7b0 100644
3106     --- a/include/linux/pci.h
3107     +++ b/include/linux/pci.h
3108     @@ -346,6 +346,8 @@ struct pci_dev {
3109     unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
3110     controlled exclusively by
3111     user sysfs */
3112     + unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
3113     + bit manually */
3114     unsigned int d3_delay; /* D3->D0 transition time in ms */
3115     unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
3116    
3117     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3118     index 820903ceac4f..28baccb1efd5 100644
3119     --- a/include/linux/skbuff.h
3120     +++ b/include/linux/skbuff.h
3121     @@ -1333,10 +1333,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
3122     struct ubuf_info *uarg = skb_zcopy(skb);
3123    
3124     if (uarg) {
3125     - if (uarg->callback == sock_zerocopy_callback) {
3126     + if (skb_zcopy_is_nouarg(skb)) {
3127     + /* no notification callback */
3128     + } else if (uarg->callback == sock_zerocopy_callback) {
3129     uarg->zerocopy = uarg->zerocopy && zerocopy;
3130     sock_zerocopy_put(uarg);
3131     - } else if (!skb_zcopy_is_nouarg(skb)) {
3132     + } else {
3133     uarg->callback(uarg, zerocopy);
3134     }
3135    
3136     @@ -2587,7 +2589,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
3137     {
3138     if (likely(!skb_zcopy(skb)))
3139     return 0;
3140     - if (skb_uarg(skb)->callback == sock_zerocopy_callback)
3141     + if (!skb_zcopy_is_nouarg(skb) &&
3142     + skb_uarg(skb)->callback == sock_zerocopy_callback)
3143     return 0;
3144     return skb_copy_ubufs(skb, gfp_mask);
3145     }
3146     diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
3147     index 2d31e22babd8..983f7a1a3f1d 100644
3148     --- a/include/net/ip6_fib.h
3149     +++ b/include/net/ip6_fib.h
3150     @@ -171,7 +171,8 @@ struct fib6_info {
3151     dst_nocount:1,
3152     dst_nopolicy:1,
3153     dst_host:1,
3154     - unused:3;
3155     + fib6_destroying:1,
3156     + unused:2;
3157    
3158     struct fib6_nh fib6_nh;
3159     struct rcu_head rcu;
3160     diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3161     index 5e3daf53b3d1..4ddd2b13ac8d 100644
3162     --- a/include/net/xfrm.h
3163     +++ b/include/net/xfrm.h
3164     @@ -295,7 +295,8 @@ struct xfrm_replay {
3165     };
3166    
3167     struct xfrm_if_cb {
3168     - struct xfrm_if *(*decode_session)(struct sk_buff *skb);
3169     + struct xfrm_if *(*decode_session)(struct sk_buff *skb,
3170     + unsigned short family);
3171     };
3172    
3173     void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
3174     @@ -1430,6 +1431,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
3175     return atomic_read(&x->tunnel_users);
3176     }
3177    
3178     +static inline bool xfrm_id_proto_valid(u8 proto)
3179     +{
3180     + switch (proto) {
3181     + case IPPROTO_AH:
3182     + case IPPROTO_ESP:
3183     + case IPPROTO_COMP:
3184     +#if IS_ENABLED(CONFIG_IPV6)
3185     + case IPPROTO_ROUTING:
3186     + case IPPROTO_DSTOPTS:
3187     +#endif
3188     + return true;
3189     + default:
3190     + return false;
3191     + }
3192     +}
3193     +
3194     +/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
3195     static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
3196     {
3197     return (!userproto || proto == userproto ||
3198     diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
3199     index 92fa24c24c92..2170e58a2a97 100644
3200     --- a/include/uapi/linux/fuse.h
3201     +++ b/include/uapi/linux/fuse.h
3202     @@ -219,10 +219,12 @@ struct fuse_file_lock {
3203     * FOPEN_DIRECT_IO: bypass page cache for this open file
3204     * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
3205     * FOPEN_NONSEEKABLE: the file is not seekable
3206     + * FOPEN_STREAM: the file is stream-like (no file position at all)
3207     */
3208     #define FOPEN_DIRECT_IO (1 << 0)
3209     #define FOPEN_KEEP_CACHE (1 << 1)
3210     #define FOPEN_NONSEEKABLE (1 << 2)
3211     +#define FOPEN_STREAM (1 << 4)
3212    
3213     /**
3214     * INIT request/reply flags
3215     diff --git a/include/video/udlfb.h b/include/video/udlfb.h
3216     index 7d09e54ae54e..58fb5732831a 100644
3217     --- a/include/video/udlfb.h
3218     +++ b/include/video/udlfb.h
3219     @@ -48,6 +48,13 @@ struct dlfb_data {
3220     int base8;
3221     u32 pseudo_palette[256];
3222     int blank_mode; /*one of FB_BLANK_ */
3223     + struct mutex render_mutex;
3224     + int damage_x;
3225     + int damage_y;
3226     + int damage_x2;
3227     + int damage_y2;
3228     + spinlock_t damage_lock;
3229     + struct work_struct damage_work;
3230     struct fb_ops ops;
3231     /* blit-only rendering path metrics, exposed through sysfs */
3232     atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
3233     diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
3234     index cebadd6af4d9..6fe72792312d 100644
3235     --- a/kernel/bpf/hashtab.c
3236     +++ b/kernel/bpf/hashtab.c
3237     @@ -518,18 +518,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
3238     return insn - insn_buf;
3239     }
3240    
3241     -static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
3242     +static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
3243     + void *key, const bool mark)
3244     {
3245     struct htab_elem *l = __htab_map_lookup_elem(map, key);
3246    
3247     if (l) {
3248     - bpf_lru_node_set_ref(&l->lru_node);
3249     + if (mark)
3250     + bpf_lru_node_set_ref(&l->lru_node);
3251     return l->key + round_up(map->key_size, 8);
3252     }
3253    
3254     return NULL;
3255     }
3256    
3257     +static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
3258     +{
3259     + return __htab_lru_map_lookup_elem(map, key, true);
3260     +}
3261     +
3262     +static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
3263     +{
3264     + return __htab_lru_map_lookup_elem(map, key, false);
3265     +}
3266     +
3267     static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
3268     struct bpf_insn *insn_buf)
3269     {
3270     @@ -1206,6 +1218,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
3271     .map_free = htab_map_free,
3272     .map_get_next_key = htab_map_get_next_key,
3273     .map_lookup_elem = htab_lru_map_lookup_elem,
3274     + .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
3275     .map_update_elem = htab_lru_map_update_elem,
3276     .map_delete_elem = htab_lru_map_delete_elem,
3277     .map_gen_lookup = htab_lru_map_gen_lookup,
3278     @@ -1237,7 +1250,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
3279    
3280     int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
3281     {
3282     - struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
3283     struct htab_elem *l;
3284     void __percpu *pptr;
3285     int ret = -ENOENT;
3286     @@ -1253,8 +1265,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
3287     l = __htab_map_lookup_elem(map, key);
3288     if (!l)
3289     goto out;
3290     - if (htab_is_lru(htab))
3291     - bpf_lru_node_set_ref(&l->lru_node);
3292     + /* We do not mark LRU map element here in order to not mess up
3293     + * eviction heuristics when user space does a map walk.
3294     + */
3295     pptr = htab_elem_get_ptr(l, map->key_size);
3296     for_each_possible_cpu(cpu) {
3297     bpf_long_memcpy(value + off,
3298     diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
3299     index 4a8f390a2b82..dc9d7ac8228d 100644
3300     --- a/kernel/bpf/inode.c
3301     +++ b/kernel/bpf/inode.c
3302     @@ -518,7 +518,7 @@ out:
3303     static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
3304     {
3305     struct bpf_prog *prog;
3306     - int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
3307     + int ret = inode_permission(inode, MAY_READ);
3308     if (ret)
3309     return ERR_PTR(ret);
3310    
3311     diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3312     index cc40b8be1171..ede82382dd32 100644
3313     --- a/kernel/bpf/syscall.c
3314     +++ b/kernel/bpf/syscall.c
3315     @@ -721,7 +721,10 @@ static int map_lookup_elem(union bpf_attr *attr)
3316     err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
3317     } else {
3318     rcu_read_lock();
3319     - ptr = map->ops->map_lookup_elem(map, key);
3320     + if (map->ops->map_lookup_elem_sys_only)
3321     + ptr = map->ops->map_lookup_elem_sys_only(map, key);
3322     + else
3323     + ptr = map->ops->map_lookup_elem(map, key);
3324     if (ptr)
3325     memcpy(value, ptr, value_size);
3326     rcu_read_unlock();
3327     diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
3328     index 217f81ecae17..4e3625109b28 100644
3329     --- a/kernel/sched/cpufreq_schedutil.c
3330     +++ b/kernel/sched/cpufreq_schedutil.c
3331     @@ -751,6 +751,7 @@ out:
3332     return 0;
3333    
3334     fail:
3335     + kobject_put(&tunables->attr_set.kobj);
3336     policy->governor_data = NULL;
3337     sugov_tunables_free(tunables);
3338    
3339     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3340     index f94be0c2827b..7345f5f8f3fe 100644
3341     --- a/kernel/trace/trace_events.c
3342     +++ b/kernel/trace/trace_events.c
3343     @@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
3344     char buf[32];
3345     int len;
3346    
3347     - if (*ppos)
3348     - return 0;
3349     -
3350     if (unlikely(!id))
3351     return -ENODEV;
3352    
3353     diff --git a/lib/Makefile b/lib/Makefile
3354     index 423876446810..0ab808318202 100644
3355     --- a/lib/Makefile
3356     +++ b/lib/Makefile
3357     @@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
3358     KCOV_INSTRUMENT_debugobjects.o := n
3359     KCOV_INSTRUMENT_dynamic_debug.o := n
3360    
3361     +# Early boot use of cmdline, don't instrument it
3362     +ifdef CONFIG_AMD_MEM_ENCRYPT
3363     +KASAN_SANITIZE_string.o := n
3364     +
3365     +ifdef CONFIG_FUNCTION_TRACER
3366     +CFLAGS_REMOVE_string.o = -pg
3367     +endif
3368     +
3369     +CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
3370     +endif
3371     +
3372     lib-y := ctype.o string.o vsprintf.o cmdline.o \
3373     rbtree.o radix-tree.o timerqueue.o\
3374     idr.o int_sqrt.o extable.o \
3375     diff --git a/net/core/dev.c b/net/core/dev.c
3376     index 3bcec116a5f2..13a82744a00a 100644
3377     --- a/net/core/dev.c
3378     +++ b/net/core/dev.c
3379     @@ -8716,7 +8716,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
3380    
3381     refcnt = netdev_refcnt_read(dev);
3382    
3383     - if (time_after(jiffies, warning_time + 10 * HZ)) {
3384     + if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
3385     pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
3386     dev->name, refcnt);
3387     warning_time = jiffies;
3388     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3389     index ebde98b565e9..3932eed379a4 100644
3390     --- a/net/core/rtnetlink.c
3391     +++ b/net/core/rtnetlink.c
3392     @@ -1496,14 +1496,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
3393     return ret;
3394     }
3395    
3396     -static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
3397     +static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
3398     + bool force)
3399     {
3400     int ifindex = dev_get_iflink(dev);
3401    
3402     - if (dev->ifindex == ifindex)
3403     - return 0;
3404     + if (force || dev->ifindex != ifindex)
3405     + return nla_put_u32(skb, IFLA_LINK, ifindex);
3406    
3407     - return nla_put_u32(skb, IFLA_LINK, ifindex);
3408     + return 0;
3409     }
3410    
3411     static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
3412     @@ -1520,6 +1521,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
3413     const struct net_device *dev,
3414     struct net *src_net)
3415     {
3416     + bool put_iflink = false;
3417     +
3418     if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
3419     struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
3420    
3421     @@ -1528,10 +1531,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
3422    
3423     if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
3424     return -EMSGSIZE;
3425     +
3426     + put_iflink = true;
3427     }
3428     }
3429    
3430     - return 0;
3431     + return nla_put_iflink(skb, dev, put_iflink);
3432     }
3433    
3434     static int rtnl_fill_link_af(struct sk_buff *skb,
3435     @@ -1617,7 +1622,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
3436     #ifdef CONFIG_RPS
3437     nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
3438     #endif
3439     - nla_put_iflink(skb, dev) ||
3440     put_master_ifindex(skb, dev) ||
3441     nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
3442     (dev->qdisc &&
3443     diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
3444     index 12a43a5369a5..114f9def1ec5 100644
3445     --- a/net/ipv4/esp4.c
3446     +++ b/net/ipv4/esp4.c
3447     @@ -223,7 +223,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
3448     tail[plen - 1] = proto;
3449     }
3450    
3451     -static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
3452     +static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
3453     {
3454     int encap_type;
3455     struct udphdr *uh;
3456     @@ -231,6 +231,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
3457     __be16 sport, dport;
3458     struct xfrm_encap_tmpl *encap = x->encap;
3459     struct ip_esp_hdr *esph = esp->esph;
3460     + unsigned int len;
3461    
3462     spin_lock_bh(&x->lock);
3463     sport = encap->encap_sport;
3464     @@ -238,11 +239,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
3465     encap_type = encap->encap_type;
3466     spin_unlock_bh(&x->lock);
3467    
3468     + len = skb->len + esp->tailen - skb_transport_offset(skb);
3469     + if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
3470     + return -EMSGSIZE;
3471     +
3472     uh = (struct udphdr *)esph;
3473     uh->source = sport;
3474     uh->dest = dport;
3475     - uh->len = htons(skb->len + esp->tailen
3476     - - skb_transport_offset(skb));
3477     + uh->len = htons(len);
3478     uh->check = 0;
3479    
3480     switch (encap_type) {
3481     @@ -259,6 +263,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
3482    
3483     *skb_mac_header(skb) = IPPROTO_UDP;
3484     esp->esph = esph;
3485     +
3486     + return 0;
3487     }
3488    
3489     int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
3490     @@ -272,8 +278,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
3491     int tailen = esp->tailen;
3492    
3493     /* this is non-NULL only with UDP Encapsulation */
3494     - if (x->encap)
3495     - esp_output_udp_encap(x, skb, esp);
3496     + if (x->encap) {
3497     + int err = esp_output_udp_encap(x, skb, esp);
3498     +
3499     + if (err < 0)
3500     + return err;
3501     + }
3502    
3503     if (!skb_cloned(skb)) {
3504     if (tailen <= skb_tailroom(skb)) {
3505     diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
3506     index 40a7cd56e008..808f8d15c519 100644
3507     --- a/net/ipv4/ip_vti.c
3508     +++ b/net/ipv4/ip_vti.c
3509     @@ -659,9 +659,9 @@ static int __init vti_init(void)
3510     return err;
3511    
3512     rtnl_link_failed:
3513     - xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
3514     -xfrm_tunnel_failed:
3515     xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
3516     +xfrm_tunnel_failed:
3517     + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
3518     xfrm_proto_comp_failed:
3519     xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
3520     xfrm_proto_ah_failed:
3521     @@ -676,6 +676,7 @@ pernet_dev_failed:
3522     static void __exit vti_fini(void)
3523     {
3524     rtnl_link_unregister(&vti_link_ops);
3525     + xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
3526     xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
3527     xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
3528     xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
3529     diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
3530     index d73a6d6652f6..2b144b92ae46 100644
3531     --- a/net/ipv4/xfrm4_policy.c
3532     +++ b/net/ipv4/xfrm4_policy.c
3533     @@ -111,7 +111,8 @@ static void
3534     _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3535     {
3536     const struct iphdr *iph = ip_hdr(skb);
3537     - u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
3538     + int ihl = iph->ihl;
3539     + u8 *xprth = skb_network_header(skb) + ihl * 4;
3540     struct flowi4 *fl4 = &fl->u.ip4;
3541     int oif = 0;
3542    
3543     @@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3544     fl4->flowi4_mark = skb->mark;
3545     fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3546    
3547     + fl4->flowi4_proto = iph->protocol;
3548     + fl4->daddr = reverse ? iph->saddr : iph->daddr;
3549     + fl4->saddr = reverse ? iph->daddr : iph->saddr;
3550     + fl4->flowi4_tos = iph->tos;
3551     +
3552     if (!ip_is_fragment(iph)) {
3553     switch (iph->protocol) {
3554     case IPPROTO_UDP:
3555     @@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3556     pskb_may_pull(skb, xprth + 4 - skb->data)) {
3557     __be16 *ports;
3558    
3559     - xprth = skb_network_header(skb) + iph->ihl * 4;
3560     + xprth = skb_network_header(skb) + ihl * 4;
3561     ports = (__be16 *)xprth;
3562    
3563     fl4->fl4_sport = ports[!!reverse];
3564     @@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3565     pskb_may_pull(skb, xprth + 2 - skb->data)) {
3566     u8 *icmp;
3567    
3568     - xprth = skb_network_header(skb) + iph->ihl * 4;
3569     + xprth = skb_network_header(skb) + ihl * 4;
3570     icmp = xprth;
3571    
3572     fl4->fl4_icmp_type = icmp[0];
3573     @@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3574     pskb_may_pull(skb, xprth + 4 - skb->data)) {
3575     __be32 *ehdr;
3576    
3577     - xprth = skb_network_header(skb) + iph->ihl * 4;
3578     + xprth = skb_network_header(skb) + ihl * 4;
3579     ehdr = (__be32 *)xprth;
3580    
3581     fl4->fl4_ipsec_spi = ehdr[0];
3582     @@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3583     pskb_may_pull(skb, xprth + 8 - skb->data)) {
3584     __be32 *ah_hdr;
3585    
3586     - xprth = skb_network_header(skb) + iph->ihl * 4;
3587     + xprth = skb_network_header(skb) + ihl * 4;
3588     ah_hdr = (__be32 *)xprth;
3589    
3590     fl4->fl4_ipsec_spi = ah_hdr[1];
3591     @@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3592     pskb_may_pull(skb, xprth + 4 - skb->data)) {
3593     __be16 *ipcomp_hdr;
3594    
3595     - xprth = skb_network_header(skb) + iph->ihl * 4;
3596     + xprth = skb_network_header(skb) + ihl * 4;
3597     ipcomp_hdr = (__be16 *)xprth;
3598    
3599     fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3600     @@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3601     __be16 *greflags;
3602     __be32 *gre_hdr;
3603    
3604     - xprth = skb_network_header(skb) + iph->ihl * 4;
3605     + xprth = skb_network_header(skb) + ihl * 4;
3606     greflags = (__be16 *)xprth;
3607     gre_hdr = (__be32 *)xprth;
3608    
3609     @@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
3610     break;
3611     }
3612     }
3613     - fl4->flowi4_proto = iph->protocol;
3614     - fl4->daddr = reverse ? iph->saddr : iph->daddr;
3615     - fl4->saddr = reverse ? iph->daddr : iph->saddr;
3616     - fl4->flowi4_tos = iph->tos;
3617     }
3618    
3619     static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
3620     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
3621     index 3e2a9bd3459c..a6c0479c1d55 100644
3622     --- a/net/ipv6/ip6_fib.c
3623     +++ b/net/ipv6/ip6_fib.c
3624     @@ -877,6 +877,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
3625     {
3626     int cpu;
3627    
3628     + /* Make sure rt6_make_pcpu_route() wont add other percpu routes
3629     + * while we are cleaning them here.
3630     + */
3631     + f6i->fib6_destroying = 1;
3632     + mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
3633     +
3634     /* release the reference to this fib entry from
3635     * all of its cached pcpu routes
3636     */
3637     @@ -900,6 +906,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
3638     {
3639     struct fib6_table *table = rt->fib6_table;
3640    
3641     + if (rt->rt6i_pcpu)
3642     + fib6_drop_pcpu_from(rt, table);
3643     +
3644     if (atomic_read(&rt->fib6_ref) != 1) {
3645     /* This route is used as dummy address holder in some split
3646     * nodes. It is not leaked, but it still holds other resources,
3647     @@ -921,9 +930,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
3648     fn = rcu_dereference_protected(fn->parent,
3649     lockdep_is_held(&table->tb6_lock));
3650     }
3651     -
3652     - if (rt->rt6i_pcpu)
3653     - fib6_drop_pcpu_from(rt, table);
3654     }
3655     }
3656    
3657     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3658     index 2e519f7b983c..bf0940c42810 100644
3659     --- a/net/ipv6/route.c
3660     +++ b/net/ipv6/route.c
3661     @@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
3662     int iif, int type, u32 portid, u32 seq,
3663     unsigned int flags);
3664     static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
3665     - struct in6_addr *daddr,
3666     - struct in6_addr *saddr);
3667     + const struct in6_addr *daddr,
3668     + const struct in6_addr *saddr);
3669    
3670     #ifdef CONFIG_IPV6_ROUTE_INFO
3671     static struct fib6_info *rt6_add_route_info(struct net *net,
3672     @@ -1268,6 +1268,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
3673     prev = cmpxchg(p, NULL, pcpu_rt);
3674     BUG_ON(prev);
3675    
3676     + if (rt->fib6_destroying) {
3677     + struct fib6_info *from;
3678     +
3679     + from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
3680     + fib6_info_release(from);
3681     + }
3682     +
3683     return pcpu_rt;
3684     }
3685    
3686     @@ -1542,31 +1549,44 @@ out:
3687     * Caller has to hold rcu_read_lock()
3688     */
3689     static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
3690     - struct in6_addr *daddr,
3691     - struct in6_addr *saddr)
3692     + const struct in6_addr *daddr,
3693     + const struct in6_addr *saddr)
3694     {
3695     + const struct in6_addr *src_key = NULL;
3696     struct rt6_exception_bucket *bucket;
3697     - struct in6_addr *src_key = NULL;
3698     struct rt6_exception *rt6_ex;
3699     struct rt6_info *res = NULL;
3700    
3701     - bucket = rcu_dereference(rt->rt6i_exception_bucket);
3702     -
3703     #ifdef CONFIG_IPV6_SUBTREES
3704     /* rt6i_src.plen != 0 indicates rt is in subtree
3705     * and exception table is indexed by a hash of
3706     * both rt6i_dst and rt6i_src.
3707     - * Otherwise, the exception table is indexed by
3708     - * a hash of only rt6i_dst.
3709     + * However, the src addr used to create the hash
3710     + * might not be exactly the passed in saddr which
3711     + * is a /128 addr from the flow.
3712     + * So we need to use f6i->fib6_src to redo lookup
3713     + * if the passed in saddr does not find anything.
3714     + * (See the logic in ip6_rt_cache_alloc() on how
3715     + * rt->rt6i_src is updated.)
3716     */
3717     if (rt->fib6_src.plen)
3718     src_key = saddr;
3719     +find_ex:
3720     #endif
3721     + bucket = rcu_dereference(rt->rt6i_exception_bucket);
3722     rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
3723    
3724     if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
3725     res = rt6_ex->rt6i;
3726    
3727     +#ifdef CONFIG_IPV6_SUBTREES
3728     + /* Use fib6_src as src_key and redo lookup */
3729     + if (!res && src_key && src_key != &rt->fib6_src.addr) {
3730     + src_key = &rt->fib6_src.addr;
3731     + goto find_ex;
3732     + }
3733     +#endif
3734     +
3735     return res;
3736     }
3737    
3738     @@ -2650,10 +2670,8 @@ out:
3739     u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
3740     struct in6_addr *saddr)
3741     {
3742     - struct rt6_exception_bucket *bucket;
3743     - struct rt6_exception *rt6_ex;
3744     - struct in6_addr *src_key;
3745     struct inet6_dev *idev;
3746     + struct rt6_info *rt;
3747     u32 mtu = 0;
3748    
3749     if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3750     @@ -2662,18 +2680,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
3751     goto out;
3752     }
3753    
3754     - src_key = NULL;
3755     -#ifdef CONFIG_IPV6_SUBTREES
3756     - if (f6i->fib6_src.plen)
3757     - src_key = saddr;
3758     -#endif
3759     -
3760     - bucket = rcu_dereference(f6i->rt6i_exception_bucket);
3761     - rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
3762     - if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
3763     - mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
3764     -
3765     - if (likely(!mtu)) {
3766     + rt = rt6_find_cached_rt(f6i, daddr, saddr);
3767     + if (unlikely(rt)) {
3768     + mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3769     + } else {
3770     struct net_device *dev = fib6_info_nh_dev(f6i);
3771    
3772     mtu = IPV6_MIN_MTU;
3773     diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
3774     index bc65db782bfb..d9e5f6808811 100644
3775     --- a/net/ipv6/xfrm6_tunnel.c
3776     +++ b/net/ipv6/xfrm6_tunnel.c
3777     @@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
3778     unsigned int i;
3779    
3780     xfrm_flush_gc();
3781     - xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
3782     + xfrm_state_flush(net, 0, false, true);
3783    
3784     for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
3785     WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
3786     @@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
3787     xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
3788     xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
3789     unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
3790     + /* Someone maybe has gotten the xfrm6_tunnel_spi.
3791     + * So need to wait it.
3792     + */
3793     + rcu_barrier();
3794     kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
3795     }
3796    
3797     diff --git a/net/key/af_key.c b/net/key/af_key.c
3798     index 7d4bed955060..0b79c9aa8eb1 100644
3799     --- a/net/key/af_key.c
3800     +++ b/net/key/af_key.c
3801     @@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
3802    
3803     if (rq->sadb_x_ipsecrequest_mode == 0)
3804     return -EINVAL;
3805     + if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
3806     + return -EINVAL;
3807    
3808     - t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
3809     + t->id.proto = rq->sadb_x_ipsecrequest_proto;
3810     if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
3811     return -EINVAL;
3812     t->mode = mode;
3813     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
3814     index 3a0171a65db3..152d4365f961 100644
3815     --- a/net/mac80211/iface.c
3816     +++ b/net/mac80211/iface.c
3817     @@ -1910,6 +1910,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
3818     list_del_rcu(&sdata->list);
3819     mutex_unlock(&sdata->local->iflist_mtx);
3820    
3821     + if (sdata->vif.txq)
3822     + ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
3823     +
3824     synchronize_rcu();
3825    
3826     if (sdata->dev) {
3827     diff --git a/net/tipc/core.c b/net/tipc/core.c
3828     index 5b38f5164281..d7b0688c98dd 100644
3829     --- a/net/tipc/core.c
3830     +++ b/net/tipc/core.c
3831     @@ -66,6 +66,10 @@ static int __net_init tipc_init_net(struct net *net)
3832     INIT_LIST_HEAD(&tn->node_list);
3833     spin_lock_init(&tn->node_list_lock);
3834    
3835     + err = tipc_socket_init();
3836     + if (err)
3837     + goto out_socket;
3838     +
3839     err = tipc_sk_rht_init(net);
3840     if (err)
3841     goto out_sk_rht;
3842     @@ -92,6 +96,8 @@ out_subscr:
3843     out_nametbl:
3844     tipc_sk_rht_destroy(net);
3845     out_sk_rht:
3846     + tipc_socket_stop();
3847     +out_socket:
3848     return err;
3849     }
3850    
3851     @@ -102,6 +108,7 @@ static void __net_exit tipc_exit_net(struct net *net)
3852     tipc_bcast_stop(net);
3853     tipc_nametbl_stop(net);
3854     tipc_sk_rht_destroy(net);
3855     + tipc_socket_stop();
3856     }
3857    
3858     static struct pernet_operations tipc_net_ops = {
3859     @@ -129,10 +136,6 @@ static int __init tipc_init(void)
3860     if (err)
3861     goto out_netlink_compat;
3862    
3863     - err = tipc_socket_init();
3864     - if (err)
3865     - goto out_socket;
3866     -
3867     err = tipc_register_sysctl();
3868     if (err)
3869     goto out_sysctl;
3870     @@ -152,8 +155,6 @@ out_bearer:
3871     out_pernet:
3872     tipc_unregister_sysctl();
3873     out_sysctl:
3874     - tipc_socket_stop();
3875     -out_socket:
3876     tipc_netlink_compat_stop();
3877     out_netlink_compat:
3878     tipc_netlink_stop();
3879     @@ -168,7 +169,6 @@ static void __exit tipc_exit(void)
3880     unregister_pernet_subsys(&tipc_net_ops);
3881     tipc_netlink_stop();
3882     tipc_netlink_compat_stop();
3883     - tipc_socket_stop();
3884     tipc_unregister_sysctl();
3885    
3886     pr_info("Deactivated\n");
3887     diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
3888     index 15eb5d3d4750..96ab344f17bb 100644
3889     --- a/net/vmw_vsock/virtio_transport.c
3890     +++ b/net/vmw_vsock/virtio_transport.c
3891     @@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
3892     if (!virtio_vsock_workqueue)
3893     return -ENOMEM;
3894    
3895     - ret = register_virtio_driver(&virtio_vsock_driver);
3896     + ret = vsock_core_init(&virtio_transport.transport);
3897     if (ret)
3898     goto out_wq;
3899    
3900     - ret = vsock_core_init(&virtio_transport.transport);
3901     + ret = register_virtio_driver(&virtio_vsock_driver);
3902     if (ret)
3903     - goto out_vdr;
3904     + goto out_vci;
3905    
3906     return 0;
3907    
3908     -out_vdr:
3909     - unregister_virtio_driver(&virtio_vsock_driver);
3910     +out_vci:
3911     + vsock_core_exit();
3912     out_wq:
3913     destroy_workqueue(virtio_vsock_workqueue);
3914     return ret;
3915     -
3916     }
3917    
3918     static void __exit virtio_vsock_exit(void)
3919     {
3920     - vsock_core_exit();
3921     unregister_virtio_driver(&virtio_vsock_driver);
3922     + vsock_core_exit();
3923     destroy_workqueue(virtio_vsock_workqueue);
3924     }
3925    
3926     diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
3927     index 602715fc9a75..f3f3d06cb6d8 100644
3928     --- a/net/vmw_vsock/virtio_transport_common.c
3929     +++ b/net/vmw_vsock/virtio_transport_common.c
3930     @@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
3931    
3932     void virtio_transport_release(struct vsock_sock *vsk)
3933     {
3934     + struct virtio_vsock_sock *vvs = vsk->trans;
3935     + struct virtio_vsock_pkt *pkt, *tmp;
3936     struct sock *sk = &vsk->sk;
3937     bool remove_sock = true;
3938    
3939     lock_sock(sk);
3940     if (sk->sk_type == SOCK_STREAM)
3941     remove_sock = virtio_transport_close(vsk);
3942     +
3943     + list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
3944     + list_del(&pkt->list);
3945     + virtio_transport_free_pkt(pkt);
3946     + }
3947     release_sock(sk);
3948    
3949     if (remove_sock)
3950     diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
3951     index 82723ef44db3..555ee2aca6c0 100644
3952     --- a/net/xfrm/xfrm_interface.c
3953     +++ b/net/xfrm/xfrm_interface.c
3954     @@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
3955     return NULL;
3956     }
3957    
3958     -static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
3959     +static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
3960     + unsigned short family)
3961     {
3962     struct xfrmi_net *xfrmn;
3963     - int ifindex;
3964     struct xfrm_if *xi;
3965     + int ifindex = 0;
3966    
3967     if (!secpath_exists(skb) || !skb->dev)
3968     return NULL;
3969    
3970     + switch (family) {
3971     + case AF_INET6:
3972     + ifindex = inet6_sdif(skb);
3973     + break;
3974     + case AF_INET:
3975     + ifindex = inet_sdif(skb);
3976     + break;
3977     + }
3978     + if (!ifindex)
3979     + ifindex = skb->dev->ifindex;
3980     +
3981     xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
3982     - ifindex = skb->dev->ifindex;
3983    
3984     for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
3985     if (ifindex == xi->dev->ifindex &&
3986     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3987     index bf5d59270f79..ce1b262ce964 100644
3988     --- a/net/xfrm/xfrm_policy.c
3989     +++ b/net/xfrm/xfrm_policy.c
3990     @@ -2339,7 +2339,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3991     ifcb = xfrm_if_get_cb();
3992    
3993     if (ifcb) {
3994     - xi = ifcb->decode_session(skb);
3995     + xi = ifcb->decode_session(skb, family);
3996     if (xi) {
3997     if_id = xi->p.if_id;
3998     net = xi->net;
3999     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
4000     index 3f729cd512af..11e09eb138d6 100644
4001     --- a/net/xfrm/xfrm_state.c
4002     +++ b/net/xfrm/xfrm_state.c
4003     @@ -2386,7 +2386,7 @@ void xfrm_state_fini(struct net *net)
4004    
4005     flush_work(&net->xfrm.state_hash_work);
4006     flush_work(&xfrm_state_gc_work);
4007     - xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
4008     + xfrm_state_flush(net, 0, false, true);
4009    
4010     WARN_ON(!list_empty(&net->xfrm.state_all));
4011    
4012     diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
4013     index 7e4904b93004..2122f89f6155 100644
4014     --- a/net/xfrm/xfrm_user.c
4015     +++ b/net/xfrm/xfrm_user.c
4016     @@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
4017     ret = verify_policy_dir(p->dir);
4018     if (ret)
4019     return ret;
4020     - if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
4021     + if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
4022     return -EINVAL;
4023    
4024     return 0;
4025     @@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4026     return -EINVAL;
4027     }
4028    
4029     - switch (ut[i].id.proto) {
4030     - case IPPROTO_AH:
4031     - case IPPROTO_ESP:
4032     - case IPPROTO_COMP:
4033     -#if IS_ENABLED(CONFIG_IPV6)
4034     - case IPPROTO_ROUTING:
4035     - case IPPROTO_DSTOPTS:
4036     -#endif
4037     - case IPSEC_PROTO_ANY:
4038     - break;
4039     - default:
4040     + if (!xfrm_id_proto_valid(ut[i].id.proto))
4041     return -EINVAL;
4042     - }
4043     -
4044     }
4045    
4046     return 0;
4047     diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
4048     index e09fe4d7307c..40e3a098f6fb 100644
4049     --- a/security/apparmor/apparmorfs.c
4050     +++ b/security/apparmor/apparmorfs.c
4051     @@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
4052     return 0;
4053     }
4054    
4055     -static void aafs_evict_inode(struct inode *inode)
4056     +static void aafs_i_callback(struct rcu_head *head)
4057     {
4058     - truncate_inode_pages_final(&inode->i_data);
4059     - clear_inode(inode);
4060     + struct inode *inode = container_of(head, struct inode, i_rcu);
4061     if (S_ISLNK(inode->i_mode))
4062     kfree(inode->i_link);
4063     + free_inode_nonrcu(inode);
4064     +}
4065     +
4066     +static void aafs_destroy_inode(struct inode *inode)
4067     +{
4068     + call_rcu(&inode->i_rcu, aafs_i_callback);
4069     }
4070    
4071     static const struct super_operations aafs_super_ops = {
4072     .statfs = simple_statfs,
4073     - .evict_inode = aafs_evict_inode,
4074     + .destroy_inode = aafs_destroy_inode,
4075     .show_path = aafs_show_path,
4076     };
4077    
4078     diff --git a/security/inode.c b/security/inode.c
4079     index 8dd9ca8848e4..829f15672e01 100644
4080     --- a/security/inode.c
4081     +++ b/security/inode.c
4082     @@ -26,17 +26,22 @@
4083     static struct vfsmount *mount;
4084     static int mount_count;
4085    
4086     -static void securityfs_evict_inode(struct inode *inode)
4087     +static void securityfs_i_callback(struct rcu_head *head)
4088     {
4089     - truncate_inode_pages_final(&inode->i_data);
4090     - clear_inode(inode);
4091     + struct inode *inode = container_of(head, struct inode, i_rcu);
4092     if (S_ISLNK(inode->i_mode))
4093     kfree(inode->i_link);
4094     + free_inode_nonrcu(inode);
4095     +}
4096     +
4097     +static void securityfs_destroy_inode(struct inode *inode)
4098     +{
4099     + call_rcu(&inode->i_rcu, securityfs_i_callback);
4100     }
4101    
4102     static const struct super_operations securityfs_super_operations = {
4103     .statfs = simple_statfs,
4104     - .evict_inode = securityfs_evict_inode,
4105     + .destroy_inode = securityfs_destroy_inode,
4106     };
4107    
4108     static int fill_super(struct super_block *sb, void *data, int silent)
4109     diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4110     index 53f8be0f4a1f..88158239622b 100644
4111     --- a/tools/objtool/Makefile
4112     +++ b/tools/objtool/Makefile
4113     @@ -7,11 +7,12 @@ ARCH := x86
4114     endif
4115    
4116     # always use the host compiler
4117     +HOSTAR ?= ar
4118     HOSTCC ?= gcc
4119     HOSTLD ?= ld
4120     +AR = $(HOSTAR)
4121     CC = $(HOSTCC)
4122     LD = $(HOSTLD)
4123     -AR = ar
4124    
4125     ifeq ($(srctree),)
4126     srctree := $(patsubst %/,%,$(dir $(CURDIR)))
4127     diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
4128     index 44195514b19e..fa56fde6e8d8 100644
4129     --- a/tools/perf/bench/numa.c
4130     +++ b/tools/perf/bench/numa.c
4131     @@ -38,6 +38,10 @@
4132     #include <numa.h>
4133     #include <numaif.h>
4134    
4135     +#ifndef RUSAGE_THREAD
4136     +# define RUSAGE_THREAD 1
4137     +#endif
4138     +
4139     /*
4140     * Regular printout to the terminal, supressed if -q is specified:
4141     */
4142     diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4143     index 0bc3e6e93c31..4357141c7c92 100644
4144     --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4145     +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4146     @@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
4147     INTEL_PT_STATE_NO_IP,
4148     INTEL_PT_STATE_ERR_RESYNC,
4149     INTEL_PT_STATE_IN_SYNC,
4150     + INTEL_PT_STATE_TNT_CONT,
4151     INTEL_PT_STATE_TNT,
4152     INTEL_PT_STATE_TIP,
4153     INTEL_PT_STATE_TIP_PGD,
4154     @@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
4155     case INTEL_PT_STATE_NO_IP:
4156     case INTEL_PT_STATE_ERR_RESYNC:
4157     case INTEL_PT_STATE_IN_SYNC:
4158     - case INTEL_PT_STATE_TNT:
4159     + case INTEL_PT_STATE_TNT_CONT:
4160     return true;
4161     + case INTEL_PT_STATE_TNT:
4162     case INTEL_PT_STATE_TIP:
4163     case INTEL_PT_STATE_TIP_PGD:
4164     case INTEL_PT_STATE_FUP:
4165     @@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
4166     timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
4167     masked_timestamp = timestamp & decoder->period_mask;
4168     if (decoder->continuous_period) {
4169     - if (masked_timestamp != decoder->last_masked_timestamp)
4170     + if (masked_timestamp > decoder->last_masked_timestamp)
4171     return 1;
4172     } else {
4173     timestamp += 1;
4174     masked_timestamp = timestamp & decoder->period_mask;
4175     - if (masked_timestamp != decoder->last_masked_timestamp) {
4176     + if (masked_timestamp > decoder->last_masked_timestamp) {
4177     decoder->last_masked_timestamp = masked_timestamp;
4178     decoder->continuous_period = true;
4179     }
4180     }
4181     +
4182     + if (masked_timestamp < decoder->last_masked_timestamp)
4183     + return decoder->period_ticks;
4184     +
4185     return decoder->period_ticks - (timestamp - masked_timestamp);
4186     }
4187    
4188     @@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
4189     case INTEL_PT_PERIOD_TICKS:
4190     timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
4191     masked_timestamp = timestamp & decoder->period_mask;
4192     - decoder->last_masked_timestamp = masked_timestamp;
4193     + if (masked_timestamp > decoder->last_masked_timestamp)
4194     + decoder->last_masked_timestamp = masked_timestamp;
4195     + else
4196     + decoder->last_masked_timestamp += decoder->period_ticks;
4197     break;
4198     case INTEL_PT_PERIOD_NONE:
4199     case INTEL_PT_PERIOD_MTC:
4200     @@ -1249,7 +1258,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4201     return -ENOENT;
4202     }
4203     decoder->tnt.count -= 1;
4204     - if (!decoder->tnt.count)
4205     + if (decoder->tnt.count)
4206     + decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
4207     + else
4208     decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4209     decoder->tnt.payload <<= 1;
4210     decoder->state.from_ip = decoder->ip;
4211     @@ -1280,7 +1291,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4212    
4213     if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
4214     decoder->tnt.count -= 1;
4215     - if (!decoder->tnt.count)
4216     + if (decoder->tnt.count)
4217     + decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
4218     + else
4219     decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4220     if (decoder->tnt.payload & BIT63) {
4221     decoder->tnt.payload <<= 1;
4222     @@ -1300,8 +1313,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4223     return 0;
4224     }
4225     decoder->ip += intel_pt_insn.length;
4226     - if (!decoder->tnt.count)
4227     + if (!decoder->tnt.count) {
4228     + decoder->sample_timestamp = decoder->timestamp;
4229     + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
4230     return -EAGAIN;
4231     + }
4232     decoder->tnt.payload <<= 1;
4233     continue;
4234     }
4235     @@ -2349,6 +2365,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
4236     err = intel_pt_walk_trace(decoder);
4237     break;
4238     case INTEL_PT_STATE_TNT:
4239     + case INTEL_PT_STATE_TNT_CONT:
4240     err = intel_pt_walk_tnt(decoder);
4241     if (err == -EAGAIN)
4242     err = intel_pt_walk_trace(decoder);
4243     diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
4244     index 294fc18aba2a..9db5a7378f40 100644
4245     --- a/tools/testing/selftests/bpf/test_verifier.c
4246     +++ b/tools/testing/selftests/bpf/test_verifier.c
4247     @@ -32,7 +32,6 @@
4248     #include <linux/if_ether.h>
4249    
4250     #include <bpf/bpf.h>
4251     -#include <bpf/libbpf.h>
4252    
4253     #ifdef HAVE_GENHDR
4254     # include "autoconf.h"
4255     @@ -57,7 +56,6 @@
4256    
4257     #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
4258     static bool unpriv_disabled = false;
4259     -static int skips;
4260    
4261     struct bpf_test {
4262     const char *descr;
4263     @@ -12772,11 +12770,6 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4264     fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
4265     prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
4266     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
4267     - if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
4268     - printf("SKIP (unsupported program type %d)\n", prog_type);
4269     - skips++;
4270     - goto close_fds;
4271     - }
4272    
4273     expected_ret = unpriv && test->result_unpriv != UNDEF ?
4274     test->result_unpriv : test->result;
4275     @@ -12912,7 +12905,7 @@ static void get_unpriv_disabled()
4276    
4277     static int do_test(bool unpriv, unsigned int from, unsigned int to)
4278     {
4279     - int i, passes = 0, errors = 0;
4280     + int i, passes = 0, errors = 0, skips = 0;
4281    
4282     for (i = from; i < to; i++) {
4283     struct bpf_test *test = &tests[i];
4284     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
4285     index 1415e36fed3d..fef3527af3bd 100644
4286     --- a/virt/kvm/arm/arm.c
4287     +++ b/virt/kvm/arm/arm.c
4288     @@ -949,7 +949,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
4289     static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
4290     const struct kvm_vcpu_init *init)
4291     {
4292     - unsigned int i;
4293     + unsigned int i, ret;
4294     int phys_target = kvm_target_cpu();
4295    
4296     if (init->target != phys_target)
4297     @@ -984,9 +984,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
4298     vcpu->arch.target = phys_target;
4299    
4300     /* Now we know what it is, we can reset it. */
4301     - return kvm_reset_vcpu(vcpu);
4302     -}
4303     + ret = kvm_reset_vcpu(vcpu);
4304     + if (ret) {
4305     + vcpu->arch.target = -1;
4306     + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
4307     + }
4308    
4309     + return ret;
4310     +}
4311    
4312     static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
4313     struct kvm_vcpu_init *init)