Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0150-4.19.51-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3429 - (hide annotations) (download)
Fri Aug 2 11:47:57 2019 UTC (4 years, 10 months ago) by niro
File size: 102204 byte(s)
-linux-4.19.51
1 niro 3429 diff --git a/Makefile b/Makefile
2     index f7e7e365e2ff..dd4be2f32b88 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 50
10     +SUBLEVEL = 51
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
15     index cdda614e417e..a370857beac0 100644
16     --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
17     +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
18     @@ -106,6 +106,7 @@
19     regulator-name = "PVDD_APIO_1V8";
20     regulator-min-microvolt = <1800000>;
21     regulator-max-microvolt = <1800000>;
22     + regulator-always-on;
23     };
24    
25     ldo3_reg: LDO3 {
26     @@ -144,6 +145,7 @@
27     regulator-name = "PVDD_ABB_1V8";
28     regulator-min-microvolt = <1800000>;
29     regulator-max-microvolt = <1800000>;
30     + regulator-always-on;
31     };
32    
33     ldo9_reg: LDO9 {
34     diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
35     index 7fae2ffb76fe..ab522c2da6df 100644
36     --- a/arch/arm/boot/dts/imx50.dtsi
37     +++ b/arch/arm/boot/dts/imx50.dtsi
38     @@ -420,7 +420,7 @@
39     reg = <0x63fb0000 0x4000>;
40     interrupts = <6>;
41     clocks = <&clks IMX5_CLK_SDMA_GATE>,
42     - <&clks IMX5_CLK_SDMA_GATE>;
43     + <&clks IMX5_CLK_AHB>;
44     clock-names = "ipg", "ahb";
45     #dma-cells = <3>;
46     fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";
47     diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
48     index 5c4ba91e43ba..ef2abc097843 100644
49     --- a/arch/arm/boot/dts/imx51.dtsi
50     +++ b/arch/arm/boot/dts/imx51.dtsi
51     @@ -481,7 +481,7 @@
52     reg = <0x83fb0000 0x4000>;
53     interrupts = <6>;
54     clocks = <&clks IMX5_CLK_SDMA_GATE>,
55     - <&clks IMX5_CLK_SDMA_GATE>;
56     + <&clks IMX5_CLK_AHB>;
57     clock-names = "ipg", "ahb";
58     #dma-cells = <3>;
59     fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
60     diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
61     index 6386185ae234..b6b0818343c4 100644
62     --- a/arch/arm/boot/dts/imx53.dtsi
63     +++ b/arch/arm/boot/dts/imx53.dtsi
64     @@ -701,7 +701,7 @@
65     reg = <0x63fb0000 0x4000>;
66     interrupts = <6>;
67     clocks = <&clks IMX5_CLK_SDMA_GATE>,
68     - <&clks IMX5_CLK_SDMA_GATE>;
69     + <&clks IMX5_CLK_AHB>;
70     clock-names = "ipg", "ahb";
71     #dma-cells = <3>;
72     fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
73     diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
74     index 61d2d26afbf4..00d44a60972f 100644
75     --- a/arch/arm/boot/dts/imx6qdl.dtsi
76     +++ b/arch/arm/boot/dts/imx6qdl.dtsi
77     @@ -905,7 +905,7 @@
78     compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
79     reg = <0x020ec000 0x4000>;
80     interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
81     - clocks = <&clks IMX6QDL_CLK_SDMA>,
82     + clocks = <&clks IMX6QDL_CLK_IPG>,
83     <&clks IMX6QDL_CLK_SDMA>;
84     clock-names = "ipg", "ahb";
85     #dma-cells = <3>;
86     diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
87     index 7a4f5dace902..2fa88c6f1882 100644
88     --- a/arch/arm/boot/dts/imx6sl.dtsi
89     +++ b/arch/arm/boot/dts/imx6sl.dtsi
90     @@ -739,7 +739,7 @@
91     reg = <0x020ec000 0x4000>;
92     interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
93     clocks = <&clks IMX6SL_CLK_SDMA>,
94     - <&clks IMX6SL_CLK_SDMA>;
95     + <&clks IMX6SL_CLK_AHB>;
96     clock-names = "ipg", "ahb";
97     #dma-cells = <3>;
98     /* imx6sl reuses imx6q sdma firmware */
99     diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
100     index 3e6ffaf5f104..7c7d5c47578e 100644
101     --- a/arch/arm/boot/dts/imx6sll.dtsi
102     +++ b/arch/arm/boot/dts/imx6sll.dtsi
103     @@ -591,7 +591,7 @@
104     compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma";
105     reg = <0x020ec000 0x4000>;
106     interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
107     - clocks = <&clks IMX6SLL_CLK_SDMA>,
108     + clocks = <&clks IMX6SLL_CLK_IPG>,
109     <&clks IMX6SLL_CLK_SDMA>;
110     clock-names = "ipg", "ahb";
111     #dma-cells = <3>;
112     diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
113     index 50083cecc6c9..7b62e6fb47eb 100644
114     --- a/arch/arm/boot/dts/imx6sx.dtsi
115     +++ b/arch/arm/boot/dts/imx6sx.dtsi
116     @@ -803,7 +803,7 @@
117     compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
118     reg = <0x020ec000 0x4000>;
119     interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
120     - clocks = <&clks IMX6SX_CLK_SDMA>,
121     + clocks = <&clks IMX6SX_CLK_IPG>,
122     <&clks IMX6SX_CLK_SDMA>;
123     clock-names = "ipg", "ahb";
124     #dma-cells = <3>;
125     diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
126     index 6dc0b569acdf..2366f093cc76 100644
127     --- a/arch/arm/boot/dts/imx6ul.dtsi
128     +++ b/arch/arm/boot/dts/imx6ul.dtsi
129     @@ -707,7 +707,7 @@
130     "fsl,imx35-sdma";
131     reg = <0x020ec000 0x4000>;
132     interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
133     - clocks = <&clks IMX6UL_CLK_SDMA>,
134     + clocks = <&clks IMX6UL_CLK_IPG>,
135     <&clks IMX6UL_CLK_SDMA>;
136     clock-names = "ipg", "ahb";
137     #dma-cells = <3>;
138     diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
139     index a052198f6e96..a7f697b0290f 100644
140     --- a/arch/arm/boot/dts/imx7s.dtsi
141     +++ b/arch/arm/boot/dts/imx7s.dtsi
142     @@ -1050,8 +1050,8 @@
143     compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
144     reg = <0x30bd0000 0x10000>;
145     interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
146     - clocks = <&clks IMX7D_SDMA_CORE_CLK>,
147     - <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
148     + clocks = <&clks IMX7D_IPG_ROOT_CLK>,
149     + <&clks IMX7D_SDMA_CORE_CLK>;
150     clock-names = "ipg", "ahb";
151     #dma-cells = <3>;
152     fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
153     diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
154     index cba23eaa6072..7a88f160b1fb 100644
155     --- a/arch/arm/include/asm/hardirq.h
156     +++ b/arch/arm/include/asm/hardirq.h
157     @@ -6,6 +6,7 @@
158     #include <linux/threads.h>
159     #include <asm/irq.h>
160    
161     +/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
162     #define NR_IPI 7
163    
164     typedef struct {
165     diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
166     index a3ce7c5365fa..bada66ef4419 100644
167     --- a/arch/arm/kernel/smp.c
168     +++ b/arch/arm/kernel/smp.c
169     @@ -76,6 +76,10 @@ enum ipi_msg_type {
170     IPI_CPU_STOP,
171     IPI_IRQ_WORK,
172     IPI_COMPLETION,
173     + /*
174     + * CPU_BACKTRACE is special and not included in NR_IPI
175     + * or tracable with trace_ipi_*
176     + */
177     IPI_CPU_BACKTRACE,
178     /*
179     * SGI8-15 can be reserved by secure firmware, and thus may
180     @@ -803,7 +807,7 @@ core_initcall(register_cpufreq_notifier);
181    
182     static void raise_nmi(cpumask_t *mask)
183     {
184     - smp_cross_call(mask, IPI_CPU_BACKTRACE);
185     + __smp_cross_call(mask, IPI_CPU_BACKTRACE);
186     }
187    
188     void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
189     diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
190     index b1fe53e8b460..088c34e99b02 100644
191     --- a/arch/arm/mach-exynos/suspend.c
192     +++ b/arch/arm/mach-exynos/suspend.c
193     @@ -434,8 +434,27 @@ early_wakeup:
194    
195     static void exynos5420_prepare_pm_resume(void)
196     {
197     + unsigned int mpidr, cluster;
198     +
199     + mpidr = read_cpuid_mpidr();
200     + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
201     +
202     if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
203     WARN_ON(mcpm_cpu_powered_up());
204     +
205     + if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
206     + /*
207     + * When system is resumed on the LITTLE/KFC core (cluster 1),
208     + * the DSCR is not properly updated until the power is turned
209     + * on also for the cluster 0. Enable it for a while to
210     + * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
211     + * block and avoid undefined instruction issue on CP14 reset.
212     + */
213     + pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
214     + EXYNOS_COMMON_CONFIGURATION(0));
215     + pmu_raw_writel(0,
216     + EXYNOS_COMMON_CONFIGURATION(0));
217     + }
218     }
219    
220     static void exynos5420_pm_resume(void)
221     diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
222     index f4971e4a86b2..ca7026958d42 100644
223     --- a/arch/arm/mach-omap2/pm33xx-core.c
224     +++ b/arch/arm/mach-omap2/pm33xx-core.c
225     @@ -51,10 +51,12 @@ static int amx3_common_init(void)
226    
227     /* CEFUSE domain can be turned off post bootup */
228     cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
229     - if (cefuse_pwrdm)
230     - omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
231     - else
232     + if (!cefuse_pwrdm)
233     pr_err("PM: Failed to get cefuse_pwrdm\n");
234     + else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
235     + pr_info("PM: Leaving EFUSE power domain active\n");
236     + else
237     + omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
238    
239     return 0;
240     }
241     diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
242     index 89950b7bf536..bdaf3536241a 100644
243     --- a/arch/mips/kernel/prom.c
244     +++ b/arch/mips/kernel/prom.c
245     @@ -41,7 +41,19 @@ char *mips_get_machine_name(void)
246     #ifdef CONFIG_USE_OF
247     void __init early_init_dt_add_memory_arch(u64 base, u64 size)
248     {
249     - return add_memory_region(base, size, BOOT_MEM_RAM);
250     + if (base >= PHYS_ADDR_MAX) {
251     + pr_warn("Trying to add an invalid memory region, skipped\n");
252     + return;
253     + }
254     +
255     + /* Truncate the passed memory region instead of type casting */
256     + if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
257     + pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
258     + size, base, PHYS_ADDR_MAX - base);
259     + size = PHYS_ADDR_MAX - base;
260     + }
261     +
262     + add_memory_region(base, size, BOOT_MEM_RAM);
263     }
264    
265     int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
266     diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
267     index 052de4c8acb2..0c572a48158e 100644
268     --- a/arch/um/kernel/time.c
269     +++ b/arch/um/kernel/time.c
270     @@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
271     static struct clock_event_device timer_clockevent = {
272     .name = "posix-timer",
273     .rating = 250,
274     - .cpumask = cpu_all_mask,
275     + .cpumask = cpu_possible_mask,
276     .features = CLOCK_EVT_FEAT_PERIODIC |
277     CLOCK_EVT_FEAT_ONESHOT,
278     .set_state_shutdown = itimer_shutdown,
279     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
280     index 09c53bcbd497..c8b0bf2b0d5e 100644
281     --- a/arch/x86/events/intel/core.c
282     +++ b/arch/x86/events/intel/core.c
283     @@ -3072,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
284     return ret;
285    
286     if (event->attr.precise_ip) {
287     - if (!(event->attr.freq || event->attr.wakeup_events)) {
288     + if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
289     event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
290     if (!(event->attr.sample_type &
291     ~intel_pmu_large_pebs_flags(event)))
292     diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
293     index 52e55108404e..d3a73f9335e1 100644
294     --- a/arch/x86/pci/irq.c
295     +++ b/arch/x86/pci/irq.c
296     @@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
297    
298     void __init pcibios_irq_init(void)
299     {
300     + struct irq_routing_table *rtable = NULL;
301     +
302     DBG(KERN_DEBUG "PCI: IRQ init\n");
303    
304     if (raw_pci_ops == NULL)
305     @@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
306     pirq_table = pirq_find_routing_table();
307    
308     #ifdef CONFIG_PCI_BIOS
309     - if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
310     + if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
311     pirq_table = pcibios_get_irq_routing_table();
312     + rtable = pirq_table;
313     + }
314     #endif
315     if (pirq_table) {
316     pirq_peer_trick();
317     @@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
318     * If we're using the I/O APIC, avoid using the PCI IRQ
319     * routing table
320     */
321     - if (io_apic_assign_pci_irqs)
322     + if (io_apic_assign_pci_irqs) {
323     + kfree(rtable);
324     pirq_table = NULL;
325     + }
326     }
327    
328     x86_init.pci.fixup_irqs();
329     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
330     index 15e8c9955b79..6bb397995610 100644
331     --- a/block/bfq-iosched.c
332     +++ b/block/bfq-iosched.c
333     @@ -2509,6 +2509,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
334     if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
335     bfq_symmetric_scenario(bfqd))
336     sl = min_t(u64, sl, BFQ_MIN_TT);
337     + else if (bfqq->wr_coeff > 1)
338     + sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
339    
340     bfqd->last_idling_start = ktime_get();
341     hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
342     diff --git a/block/blk-core.c b/block/blk-core.c
343     index 33488b1426b7..6eed5d84c2ef 100644
344     --- a/block/blk-core.c
345     +++ b/block/blk-core.c
346     @@ -411,7 +411,6 @@ void blk_sync_queue(struct request_queue *q)
347     struct blk_mq_hw_ctx *hctx;
348     int i;
349    
350     - cancel_delayed_work_sync(&q->requeue_work);
351     queue_for_each_hw_ctx(q, hctx, i)
352     cancel_delayed_work_sync(&hctx->run_work);
353     } else {
354     diff --git a/block/blk-mq.c b/block/blk-mq.c
355     index 4e563ee462cb..70d839b9c3b0 100644
356     --- a/block/blk-mq.c
357     +++ b/block/blk-mq.c
358     @@ -2465,6 +2465,8 @@ void blk_mq_release(struct request_queue *q)
359     struct blk_mq_hw_ctx *hctx;
360     unsigned int i;
361    
362     + cancel_delayed_work_sync(&q->requeue_work);
363     +
364     /* hctx kobj stays in hctx */
365     queue_for_each_hw_ctx(q, hctx, i) {
366     if (!hctx)
367     diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
368     index 64191694ff6e..9cfdbea493bb 100644
369     --- a/drivers/clk/rockchip/clk-rk3288.c
370     +++ b/drivers/clk/rockchip/clk-rk3288.c
371     @@ -835,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
372     RK3288_CLKSEL_CON(10),
373     RK3288_CLKSEL_CON(33),
374     RK3288_CLKSEL_CON(37),
375     +
376     + /* We turn aclk_dmac1 on for suspend; this will restore it */
377     + RK3288_CLKGATE_CON(10),
378     };
379    
380     static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
381     @@ -850,6 +853,14 @@ static int rk3288_clk_suspend(void)
382     readl_relaxed(rk3288_cru_base + reg_id);
383     }
384    
385     + /*
386     + * Going into deep sleep (specifically setting PMU_CLR_DMA in
387     + * RK3288_PMU_PWRMODE_CON1) appears to fail unless
388     + * "aclk_dmac1" is on.
389     + */
390     + writel_relaxed(1 << (12 + 16),
391     + rk3288_cru_base + RK3288_CLKGATE_CON(10));
392     +
393     /*
394     * Switch PLLs other than DPLL (for SDRAM) to slow mode to
395     * avoid crashes on resume. The Mask ROM on the system will
396     diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
397     index 1fbf9cb9b742..89c5e5b46068 100644
398     --- a/drivers/dma/idma64.c
399     +++ b/drivers/dma/idma64.c
400     @@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip)
401     idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
402     idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
403    
404     - idma64->dma.dev = chip->dev;
405     + idma64->dma.dev = chip->sysdev;
406    
407     dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
408    
409     @@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
410     {
411     struct idma64_chip *chip;
412     struct device *dev = &pdev->dev;
413     + struct device *sysdev = dev->parent;
414     struct resource *mem;
415     int ret;
416    
417     @@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
418     if (IS_ERR(chip->regs))
419     return PTR_ERR(chip->regs);
420    
421     - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
422     + ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
423     if (ret)
424     return ret;
425    
426     chip->dev = dev;
427     + chip->sysdev = sysdev;
428    
429     ret = idma64_probe(chip);
430     if (ret)
431     diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
432     index 6b816878e5e7..baa32e1425de 100644
433     --- a/drivers/dma/idma64.h
434     +++ b/drivers/dma/idma64.h
435     @@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
436     /**
437     * struct idma64_chip - representation of iDMA 64-bit controller hardware
438     * @dev: struct device of the DMA controller
439     + * @sysdev: struct device of the physical device that does DMA
440     * @irq: irq line
441     * @regs: memory mapped I/O space
442     * @idma64: struct idma64 that is filed by idma64_probe()
443     */
444     struct idma64_chip {
445     struct device *dev;
446     + struct device *sysdev;
447     int irq;
448     void __iomem *regs;
449     struct idma64 *idma64;
450     diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
451     index 57304b2e989f..b00cc03ad6b6 100644
452     --- a/drivers/edac/Kconfig
453     +++ b/drivers/edac/Kconfig
454     @@ -250,8 +250,8 @@ config EDAC_PND2
455     micro-server but may appear on others in the future.
456    
457     config EDAC_MPC85XX
458     - tristate "Freescale MPC83xx / MPC85xx"
459     - depends on FSL_SOC
460     + bool "Freescale MPC83xx / MPC85xx"
461     + depends on FSL_SOC && EDAC=y
462     help
463     Support for error detection and correction on the Freescale
464     MPC8349, MPC8560, MPC8540, MPC8548, T4240
465     diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
466     index 6c1acf642c8e..6fa430d98517 100644
467     --- a/drivers/gpio/gpio-omap.c
468     +++ b/drivers/gpio/gpio-omap.c
469     @@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
470     }
471     }
472    
473     +/*
474     + * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
475     + * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
476     + * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
477     + * are capable waking up the system from off mode.
478     + */
479     +static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
480     +{
481     + u32 no_wake = bank->non_wakeup_gpios;
482     +
483     + if (no_wake)
484     + return !!(~no_wake & gpio_mask);
485     +
486     + return false;
487     +}
488     +
489     static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
490     unsigned trigger)
491     {
492     @@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
493     }
494    
495     /* This part needs to be executed always for OMAP{34xx, 44xx} */
496     - if (!bank->regs->irqctrl) {
497     - /* On omap24xx proceed only when valid GPIO bit is set */
498     - if (bank->non_wakeup_gpios) {
499     - if (!(bank->non_wakeup_gpios & gpio_bit))
500     - goto exit;
501     - }
502     -
503     + if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
504     /*
505     * Log the edge gpio and manually trigger the IRQ
506     * after resume if the input level changes
507     @@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
508     bank->enabled_non_wakeup_gpios &= ~gpio_bit;
509     }
510    
511     -exit:
512     bank->level_mask =
513     readl_relaxed(bank->base + bank->regs->leveldetect0) |
514     readl_relaxed(bank->base + bank->regs->leveldetect1);
515     diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
516     index 7e09ce75ffb2..a9cb5571de54 100644
517     --- a/drivers/gpio/gpio-vf610.c
518     +++ b/drivers/gpio/gpio-vf610.c
519     @@ -37,6 +37,7 @@ struct fsl_gpio_soc_data {
520    
521     struct vf610_gpio_port {
522     struct gpio_chip gc;
523     + struct irq_chip ic;
524     void __iomem *base;
525     void __iomem *gpio_base;
526     const struct fsl_gpio_soc_data *sdata;
527     @@ -66,8 +67,6 @@ struct vf610_gpio_port {
528     #define PORT_INT_EITHER_EDGE 0xb
529     #define PORT_INT_LOGIC_ONE 0xc
530    
531     -static struct irq_chip vf610_gpio_irq_chip;
532     -
533     static const struct fsl_gpio_soc_data imx_data = {
534     .have_paddr = true,
535     };
536     @@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
537     return 0;
538     }
539    
540     -static struct irq_chip vf610_gpio_irq_chip = {
541     - .name = "gpio-vf610",
542     - .irq_ack = vf610_gpio_irq_ack,
543     - .irq_mask = vf610_gpio_irq_mask,
544     - .irq_unmask = vf610_gpio_irq_unmask,
545     - .irq_set_type = vf610_gpio_irq_set_type,
546     - .irq_set_wake = vf610_gpio_irq_set_wake,
547     -};
548     -
549     static int vf610_gpio_probe(struct platform_device *pdev)
550     {
551     struct device *dev = &pdev->dev;
552     @@ -259,6 +249,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
553     struct vf610_gpio_port *port;
554     struct resource *iores;
555     struct gpio_chip *gc;
556     + struct irq_chip *ic;
557     int i;
558     int ret;
559    
560     @@ -295,6 +286,14 @@ static int vf610_gpio_probe(struct platform_device *pdev)
561     gc->direction_output = vf610_gpio_direction_output;
562     gc->set = vf610_gpio_set;
563    
564     + ic = &port->ic;
565     + ic->name = "gpio-vf610";
566     + ic->irq_ack = vf610_gpio_irq_ack;
567     + ic->irq_mask = vf610_gpio_irq_mask;
568     + ic->irq_unmask = vf610_gpio_irq_unmask;
569     + ic->irq_set_type = vf610_gpio_irq_set_type;
570     + ic->irq_set_wake = vf610_gpio_irq_set_wake;
571     +
572     ret = gpiochip_add_data(gc, port);
573     if (ret < 0)
574     return ret;
575     @@ -306,14 +305,13 @@ static int vf610_gpio_probe(struct platform_device *pdev)
576     /* Clear the interrupt status register for all GPIO's */
577     vf610_gpio_writel(~0, port->base + PORT_ISFR);
578    
579     - ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
580     - handle_edge_irq, IRQ_TYPE_NONE);
581     + ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
582     if (ret) {
583     dev_err(dev, "failed to add irqchip\n");
584     gpiochip_remove(gc);
585     return ret;
586     }
587     - gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
588     + gpiochip_set_chained_irqchip(gc, ic, port->irq,
589     vf610_gpio_irq_handler);
590    
591     return 0;
592     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
593     index bf8b68f8db4f..bce5741f2952 100644
594     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
595     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
596     @@ -388,6 +388,10 @@ void dpp1_cnv_setup (
597     default:
598     break;
599     }
600     +
601     + /* Set default color space based on format if none is given. */
602     + color_space = input_color_space ? input_color_space : color_space;
603     +
604     REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
605     CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
606     REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
607     @@ -399,7 +403,7 @@ void dpp1_cnv_setup (
608     for (i = 0; i < 12; i++)
609     tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
610    
611     - tbl_entry.color_space = input_color_space;
612     + tbl_entry.color_space = color_space;
613    
614     if (color_space >= COLOR_SPACE_YCBCR601)
615     select = INPUT_CSC_SELECT_ICSC;
616     diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
617     index a0355709abd1..7736ef123e9b 100644
618     --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
619     +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
620     @@ -1890,7 +1890,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
621     plane_state->format,
622     EXPANSION_MODE_ZERO,
623     plane_state->input_csc_color_matrix,
624     - COLOR_SPACE_YCBCR601_LIMITED);
625     + plane_state->color_space);
626    
627     //set scale and bias registers
628     build_prescale_params(&bns_params, plane_state);
629     diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
630     index 85c2d407a52e..e7ddd3e3db92 100644
631     --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
632     +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
633     @@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
634     vsync_polarity = 1;
635     }
636    
637     - if (mode->vrefresh <= 24000)
638     + if (drm_mode_vrefresh(mode) <= 24)
639     low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
640     - else if (mode->vrefresh <= 25000)
641     + else if (drm_mode_vrefresh(mode) <= 25)
642     low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
643     - else if (mode->vrefresh <= 30000)
644     + else if (drm_mode_vrefresh(mode) <= 30)
645     low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
646     else
647     low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
648     diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
649     index 00d9d77f583a..4b75ad40dd80 100644
650     --- a/drivers/gpu/drm/nouveau/Kconfig
651     +++ b/drivers/gpu/drm/nouveau/Kconfig
652     @@ -16,20 +16,9 @@ config DRM_NOUVEAU
653     select INPUT if ACPI && X86
654     select THERMAL if ACPI && X86
655     select ACPI_VIDEO if ACPI && X86
656     - help
657     - Choose this option for open-source NVIDIA support.
658     -
659     -config NOUVEAU_LEGACY_CTX_SUPPORT
660     - bool "Nouveau legacy context support"
661     - depends on DRM_NOUVEAU
662     select DRM_VM
663     - default y
664     help
665     - There was a version of the nouveau DDX that relied on legacy
666     - ctx ioctls not erroring out. But that was back in time a long
667     - ways, so offer a way to disable it now. For uapi compat with
668     - old nouveau ddx this should be on by default, but modern distros
669     - should consider turning it off.
670     + Choose this option for open-source NVIDIA support.
671    
672     config NOUVEAU_PLATFORM_DRIVER
673     bool "Nouveau (NVIDIA) SoC GPUs"
674     diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
675     index e48c5eb35b49..66c125a6b0b3 100644
676     --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
677     +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
678     @@ -41,6 +41,7 @@ struct nv50_disp_interlock {
679     NV50_DISP_INTERLOCK__SIZE
680     } type;
681     u32 data;
682     + u32 wimm;
683     };
684    
685     void corec37d_ntfy_init(struct nouveau_bo *, u32);
686     diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
687     index 4f57e5379796..d81a99bb2ac3 100644
688     --- a/drivers/gpu/drm/nouveau/dispnv50/head.c
689     +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
690     @@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
691     asyh->set.or = head->func->or != NULL;
692     }
693    
694     - if (asyh->state.mode_changed)
695     + if (asyh->state.mode_changed || asyh->state.connectors_changed)
696     nv50_head_atomic_check_mode(head, asyh);
697    
698     if (asyh->state.color_mgmt_changed ||
699     diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
700     index 9103b8494279..f7dbd965e4e7 100644
701     --- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
702     +++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
703     @@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
704     return ret;
705     }
706    
707     + wndw->interlock.wimm = wndw->interlock.data;
708     wndw->immd = func;
709     return 0;
710     }
711     diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
712     index 2187922e8dc2..b3db4553098d 100644
713     --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
714     +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
715     @@ -151,7 +151,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
716     if (asyw->set.point) {
717     if (asyw->set.point = false, asyw->set.mask)
718     interlock[wndw->interlock.type] |= wndw->interlock.data;
719     - interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
720     + interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
721    
722     wndw->immd->point(wndw, asyw);
723     wndw->immd->update(wndw, interlock);
724     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
725     index 2b7a54cc3c9e..74d2283f2c28 100644
726     --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
727     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
728     @@ -1015,11 +1015,8 @@ nouveau_driver_fops = {
729     static struct drm_driver
730     driver_stub = {
731     .driver_features =
732     - DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
733     -#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
734     - | DRIVER_KMS_LEGACY_CONTEXT
735     -#endif
736     - ,
737     + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
738     + DRIVER_KMS_LEGACY_CONTEXT,
739    
740     .load = nouveau_drm_load,
741     .unload = nouveau_drm_unload,
742     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
743     index 5f301e632599..818d21bd28d3 100644
744     --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
745     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
746     @@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
747     * and it's better to have a failed modeset than that.
748     */
749     for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
750     - if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
751     - failsafe = cfg;
752     + if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
753     + /* Try to respect sink limits too when selecting
754     + * lowest link configuration.
755     + */
756     + if (!failsafe ||
757     + (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
758     + failsafe = cfg;
759     + }
760     +
761     if (failsafe && cfg[1].rate < dataKBps)
762     break;
763     }
764     diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
765     index 754f6b25f265..6d9f78612dee 100644
766     --- a/drivers/gpu/drm/pl111/pl111_display.c
767     +++ b/drivers/gpu/drm/pl111/pl111_display.c
768     @@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
769     dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
770     return PTR_ERR(parent);
771     }
772     +
773     + spin_lock_init(&priv->tim2_lock);
774     +
775     /* If the clock divider is broken, use the parent directly */
776     if (priv->variant->broken_clockdivider) {
777     priv->clk = parent;
778     return 0;
779     }
780     parent_name = __clk_get_name(parent);
781     -
782     - spin_lock_init(&priv->tim2_lock);
783     div->init = &init;
784    
785     ret = devm_clk_hw_register(drm->dev, div);
786     diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
787     index ab39315c9078..39e608271263 100644
788     --- a/drivers/gpu/drm/vc4/vc4_plane.c
789     +++ b/drivers/gpu/drm/vc4/vc4_plane.c
790     @@ -818,6 +818,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
791     drm_atomic_set_fb_for_plane(plane->state, state->fb);
792     }
793    
794     + swap(plane->state->fb, state->fb);
795     /* Set the cursor's position on the screen. This is the
796     * expected change from the drm_mode_cursor_universal()
797     * helper.
798     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
799     index 9ae3678844eb..40fbf20d69e5 100644
800     --- a/drivers/iommu/arm-smmu-v3.c
801     +++ b/drivers/iommu/arm-smmu-v3.c
802     @@ -2414,13 +2414,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
803     /* Clear CR0 and sync (disables SMMU and queue processing) */
804     reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
805     if (reg & CR0_SMMUEN) {
806     - if (is_kdump_kernel()) {
807     - arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
808     - arm_smmu_device_disable(smmu);
809     - return -EBUSY;
810     - }
811     -
812     dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
813     + WARN_ON(is_kdump_kernel() && !disable_bypass);
814     + arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
815     }
816    
817     ret = arm_smmu_device_disable(smmu);
818     @@ -2513,6 +2509,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
819     return ret;
820     }
821    
822     + if (is_kdump_kernel())
823     + enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
824    
825     /* Enable the SMMU interface, or ensure bypass */
826     if (!bypass || disable_bypass) {
827     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
828     index 603bf5233a99..c1439019dd12 100644
829     --- a/drivers/iommu/intel-iommu.c
830     +++ b/drivers/iommu/intel-iommu.c
831     @@ -4033,9 +4033,7 @@ static void __init init_no_remapping_devices(void)
832    
833     /* This IOMMU has *only* gfx devices. Either bypass it or
834     set the gfx_mapped flag, as appropriate */
835     - if (dmar_map_gfx) {
836     - intel_iommu_gfx_mapped = 1;
837     - } else {
838     + if (!dmar_map_gfx) {
839     drhd->ignored = 1;
840     for_each_active_dev_scope(drhd->devices,
841     drhd->devices_cnt, i, dev)
842     @@ -4831,6 +4829,9 @@ int __init intel_iommu_init(void)
843     goto out_free_reserved_range;
844     }
845    
846     + if (dmar_map_gfx)
847     + intel_iommu_gfx_mapped = 1;
848     +
849     init_no_remapping_devices();
850    
851     ret = init_dmars();
852     diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
853     index 533b0da5235d..ca1f993c0de3 100644
854     --- a/drivers/mailbox/stm32-ipcc.c
855     +++ b/drivers/mailbox/stm32-ipcc.c
856     @@ -8,9 +8,9 @@
857     #include <linux/bitfield.h>
858     #include <linux/clk.h>
859     #include <linux/interrupt.h>
860     +#include <linux/io.h>
861     #include <linux/mailbox_controller.h>
862     #include <linux/module.h>
863     -#include <linux/of_irq.h>
864     #include <linux/platform_device.h>
865     #include <linux/pm_wakeirq.h>
866    
867     @@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
868    
869     /* irq */
870     for (i = 0; i < IPCC_IRQ_NUM; i++) {
871     - ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
872     + ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
873     if (ipcc->irqs[i] < 0) {
874     - dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
875     + if (ipcc->irqs[i] != -EPROBE_DEFER)
876     + dev_err(dev, "no IRQ specified %s\n",
877     + irq_name[i]);
878     ret = ipcc->irqs[i];
879     goto err_clk;
880     }
881     @@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
882    
883     /* wakeup */
884     if (of_property_read_bool(np, "wakeup-source")) {
885     - ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
886     + ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
887     if (ipcc->wkp < 0) {
888     - dev_err(dev, "could not get wakeup IRQ\n");
889     + if (ipcc->wkp != -EPROBE_DEFER)
890     + dev_err(dev, "could not get wakeup IRQ\n");
891     ret = ipcc->wkp;
892     goto err_clk;
893     }
894     diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
895     index 50bffc3382d7..ff3fba16e735 100644
896     --- a/drivers/mfd/intel-lpss.c
897     +++ b/drivers/mfd/intel-lpss.c
898     @@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
899     {
900     u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
901    
902     + /* Set the device in reset state */
903     + writel(0, lpss->priv + LPSS_PRIV_RESETS);
904     +
905     intel_lpss_deassert_reset(lpss);
906    
907     intel_lpss_set_remap_addr(lpss);
908     diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
909     index 3bd75061f777..f78be039e463 100644
910     --- a/drivers/mfd/tps65912-spi.c
911     +++ b/drivers/mfd/tps65912-spi.c
912     @@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = {
913     { .compatible = "ti,tps65912", },
914     { /* sentinel */ }
915     };
916     +MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
917    
918     static int tps65912_spi_probe(struct spi_device *spi)
919     {
920     diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
921     index dd19f17a1b63..2b8c479dbfa6 100644
922     --- a/drivers/mfd/twl6040.c
923     +++ b/drivers/mfd/twl6040.c
924     @@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
925     }
926     }
927    
928     + /*
929     + * Register access can produce errors after power-up unless we
930     + * wait at least 8ms based on measurements on duovero.
931     + */
932     + usleep_range(10000, 12000);
933     +
934     /* Sync with the HW */
935     - regcache_sync(twl6040->regmap);
936     + ret = regcache_sync(twl6040->regmap);
937     + if (ret) {
938     + dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
939     + ret);
940     + goto out;
941     + }
942    
943     /* Default PLL configuration after power up */
944     twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
945     diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
946     index 896e2df9400f..fd33a3b9c66f 100644
947     --- a/drivers/misc/pci_endpoint_test.c
948     +++ b/drivers/misc/pci_endpoint_test.c
949     @@ -662,6 +662,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
950     data = (struct pci_endpoint_test_data *)ent->driver_data;
951     if (data) {
952     test_reg_bar = data->test_reg_bar;
953     + test->test_reg_bar = test_reg_bar;
954     test->alignment = data->alignment;
955     irq_type = data->irq_type;
956     }
957     diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
958     index 1841d250e9e2..eb1a65cb878f 100644
959     --- a/drivers/mmc/host/mmci.c
960     +++ b/drivers/mmc/host/mmci.c
961     @@ -1295,9 +1295,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
962     }
963    
964     /*
965     - * Don't poll for busy completion in irq context.
966     + * Busy detection has been handled by mmci_cmd_irq() above.
967     + * Clear the status bit to prevent polling in IRQ context.
968     */
969     - if (host->variant->busy_detect && host->busy_status)
970     + if (host->variant->busy_detect_flag)
971     status &= ~host->variant->busy_detect_flag;
972    
973     ret = 1;
974     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
975     index 340baf6a470c..4648c6a9d9e8 100644
976     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
977     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
978     @@ -4300,8 +4300,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
979     return hclge_add_mac_vlan_tbl(vport, &req, NULL);
980    
981     /* check if we just hit the duplicate */
982     - if (!ret)
983     - ret = -EINVAL;
984     + if (!ret) {
985     + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
986     + vport->vport_id, addr);
987     + return 0;
988     + }
989    
990     dev_err(&hdev->pdev->dev,
991     "PF failed to add unicast entry(%pM) in the MAC table\n",
992     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
993     index df8808cd7e11..4e04985fb430 100644
994     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
995     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
996     @@ -6758,10 +6758,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
997     struct i40e_pf *pf = vsi->back;
998     u8 enabled_tc = 0, num_tc, hw;
999     bool need_reset = false;
1000     + int old_queue_pairs;
1001     int ret = -EINVAL;
1002     u16 mode;
1003     int i;
1004    
1005     + old_queue_pairs = vsi->num_queue_pairs;
1006     num_tc = mqprio_qopt->qopt.num_tc;
1007     hw = mqprio_qopt->qopt.hw;
1008     mode = mqprio_qopt->mode;
1009     @@ -6862,6 +6864,7 @@ config_tc:
1010     }
1011     ret = i40e_configure_queue_channels(vsi);
1012     if (ret) {
1013     + vsi->num_queue_pairs = old_queue_pairs;
1014     netdev_info(netdev,
1015     "Failed configuring queue channels\n");
1016     need_reset = true;
1017     diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1018     index db1543bca701..875f97aba6e0 100644
1019     --- a/drivers/net/ethernet/intel/ice/ice_main.c
1020     +++ b/drivers/net/ethernet/intel/ice/ice_main.c
1021     @@ -652,6 +652,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
1022     case ICE_FC_RX_PAUSE:
1023     fc = "RX";
1024     break;
1025     + case ICE_FC_NONE:
1026     + fc = "None";
1027     + break;
1028     default:
1029     fc = "Unknown";
1030     break;
1031     diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
1032     index e0d6760f3219..4b5af2413970 100644
1033     --- a/drivers/net/thunderbolt.c
1034     +++ b/drivers/net/thunderbolt.c
1035     @@ -1285,6 +1285,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
1036     tbnet_tear_down(net, true);
1037     }
1038    
1039     + tb_unregister_protocol_handler(&net->handler);
1040     return 0;
1041     }
1042    
1043     @@ -1293,6 +1294,8 @@ static int __maybe_unused tbnet_resume(struct device *dev)
1044     struct tb_service *svc = tb_to_service(dev);
1045     struct tbnet *net = tb_service_get_drvdata(svc);
1046    
1047     + tb_register_protocol_handler(&net->handler);
1048     +
1049     netif_carrier_off(net->dev);
1050     if (netif_running(net->dev)) {
1051     netif_device_attach(net->dev);
1052     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1053     index 7b9ef8e734e7..c8eeecc58115 100644
1054     --- a/drivers/nvme/host/pci.c
1055     +++ b/drivers/nvme/host/pci.c
1056     @@ -1132,6 +1132,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1057     struct nvme_dev *dev = nvmeq->dev;
1058     struct request *abort_req;
1059     struct nvme_command cmd;
1060     + bool shutdown = false;
1061     u32 csts = readl(dev->bar + NVME_REG_CSTS);
1062    
1063     /* If PCI error recovery process is happening, we cannot reset or
1064     @@ -1168,12 +1169,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1065     * shutdown, so we return BLK_EH_DONE.
1066     */
1067     switch (dev->ctrl.state) {
1068     + case NVME_CTRL_DELETING:
1069     + shutdown = true;
1070     case NVME_CTRL_CONNECTING:
1071     case NVME_CTRL_RESETTING:
1072     dev_warn_ratelimited(dev->ctrl.device,
1073     "I/O %d QID %d timeout, disable controller\n",
1074     req->tag, nvmeq->qid);
1075     - nvme_dev_disable(dev, false);
1076     + nvme_dev_disable(dev, shutdown);
1077     nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1078     return BLK_EH_DONE;
1079     default:
1080     @@ -2187,8 +2190,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1081     * must flush all entered requests to their failed completion to avoid
1082     * deadlocking blk-mq hot-cpu notifier.
1083     */
1084     - if (shutdown)
1085     + if (shutdown) {
1086     nvme_start_queues(&dev->ctrl);
1087     + if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
1088     + blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1089     + }
1090     mutex_unlock(&dev->shutdown_lock);
1091     }
1092    
1093     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
1094     index 7c530c88b3fb..99de51e87f7f 100644
1095     --- a/drivers/nvmem/core.c
1096     +++ b/drivers/nvmem/core.c
1097     @@ -1028,7 +1028,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
1098     static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1099     {
1100     u8 *p, *b;
1101     - int i, bit_offset = cell->bit_offset;
1102     + int i, extra, bit_offset = cell->bit_offset;
1103    
1104     p = b = buf;
1105     if (bit_offset) {
1106     @@ -1043,11 +1043,16 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1107     p = b;
1108     *b++ >>= bit_offset;
1109     }
1110     -
1111     - /* result fits in less bytes */
1112     - if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1113     - *p-- = 0;
1114     + } else {
1115     + /* point to the msb */
1116     + p += cell->bytes - 1;
1117     }
1118     +
1119     + /* result fits in less bytes */
1120     + extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1121     + while (--extra >= 0)
1122     + *p-- = 0;
1123     +
1124     /* clear msb bits if any leftover in the last byte */
1125     *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1126     }
1127     diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
1128     index d020f89248fd..69f8e972e29c 100644
1129     --- a/drivers/nvmem/sunxi_sid.c
1130     +++ b/drivers/nvmem/sunxi_sid.c
1131     @@ -235,8 +235,10 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = {
1132     static const struct of_device_id sunxi_sid_of_match[] = {
1133     { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
1134     { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
1135     + { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
1136     { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
1137     { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
1138     + { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
1139     {/* sentinel */},
1140     };
1141     MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
1142     diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
1143     index e88bd221fffe..5e199e7d2d4f 100644
1144     --- a/drivers/pci/controller/dwc/pci-keystone.c
1145     +++ b/drivers/pci/controller/dwc/pci-keystone.c
1146     @@ -237,6 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
1147     ks_dw_pcie_enable_error_irq(ks_pcie);
1148     }
1149    
1150     +#ifdef CONFIG_ARM
1151     /*
1152     * When a PCI device does not exist during config cycles, keystone host gets a
1153     * bus error instead of returning 0xffffffff. This handler always returns 0
1154     @@ -256,6 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
1155    
1156     return 0;
1157     }
1158     +#endif
1159    
1160     static int __init ks_pcie_host_init(struct pcie_port *pp)
1161     {
1162     @@ -279,12 +281,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
1163     val |= BIT(12);
1164     writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
1165    
1166     +#ifdef CONFIG_ARM
1167     /*
1168     * PCIe access errors that result into OCP errors are caught by ARM as
1169     * "External aborts"
1170     */
1171     hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
1172     "Asynchronous external abort");
1173     +#endif
1174    
1175     return 0;
1176     }
1177     diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
1178     index de8635af4cde..739d97080d3b 100644
1179     --- a/drivers/pci/controller/dwc/pcie-designware-ep.c
1180     +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
1181     @@ -385,6 +385,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1182     {
1183     struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1184     struct pci_epc *epc = ep->epc;
1185     + unsigned int aligned_offset;
1186     u16 msg_ctrl, msg_data;
1187     u32 msg_addr_lower, msg_addr_upper, reg;
1188     u64 msg_addr;
1189     @@ -410,13 +411,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
1190     reg = ep->msi_cap + PCI_MSI_DATA_32;
1191     msg_data = dw_pcie_readw_dbi(pci, reg);
1192     }
1193     - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
1194     + aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
1195     + msg_addr = ((u64)msg_addr_upper) << 32 |
1196     + (msg_addr_lower & ~aligned_offset);
1197     ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
1198     epc->mem->page_size);
1199     if (ret)
1200     return ret;
1201    
1202     - writel(msg_data | (interrupt_num - 1), ep->msi_mem);
1203     + writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
1204    
1205     dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
1206    
1207     diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
1208     index b56e22262a77..acd50920c2ff 100644
1209     --- a/drivers/pci/controller/dwc/pcie-designware-host.c
1210     +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
1211     @@ -303,20 +303,24 @@ void dw_pcie_free_msi(struct pcie_port *pp)
1212    
1213     irq_domain_remove(pp->msi_domain);
1214     irq_domain_remove(pp->irq_domain);
1215     +
1216     + if (pp->msi_page)
1217     + __free_page(pp->msi_page);
1218     }
1219    
1220     void dw_pcie_msi_init(struct pcie_port *pp)
1221     {
1222     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1223     struct device *dev = pci->dev;
1224     - struct page *page;
1225     u64 msi_target;
1226    
1227     - page = alloc_page(GFP_KERNEL);
1228     - pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1229     + pp->msi_page = alloc_page(GFP_KERNEL);
1230     + pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
1231     + DMA_FROM_DEVICE);
1232     if (dma_mapping_error(dev, pp->msi_data)) {
1233     dev_err(dev, "Failed to map MSI data\n");
1234     - __free_page(page);
1235     + __free_page(pp->msi_page);
1236     + pp->msi_page = NULL;
1237     return;
1238     }
1239     msi_target = (u64)pp->msi_data;
1240     @@ -439,7 +443,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
1241     if (ret)
1242     pci->num_viewport = 2;
1243    
1244     - if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
1245     + if (pci_msi_enabled()) {
1246     /*
1247     * If a specific SoC driver needs to change the
1248     * default number of vectors, it needs to implement
1249     @@ -477,7 +481,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
1250     if (pp->ops->host_init) {
1251     ret = pp->ops->host_init(pp);
1252     if (ret)
1253     - goto error;
1254     + goto err_free_msi;
1255     }
1256    
1257     pp->root_bus_nr = pp->busn->start;
1258     @@ -491,7 +495,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
1259    
1260     ret = pci_scan_root_bus_bridge(bridge);
1261     if (ret)
1262     - goto error;
1263     + goto err_free_msi;
1264    
1265     bus = bridge->bus;
1266    
1267     @@ -507,6 +511,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
1268     pci_bus_add_devices(bus);
1269     return 0;
1270    
1271     +err_free_msi:
1272     + if (pci_msi_enabled() && !pp->ops->msi_host_init)
1273     + dw_pcie_free_msi(pp);
1274     error:
1275     pci_free_host_bridge(bridge);
1276     return ret;
1277     diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
1278     index 9f1a5e399b70..14dcf6646699 100644
1279     --- a/drivers/pci/controller/dwc/pcie-designware.h
1280     +++ b/drivers/pci/controller/dwc/pcie-designware.h
1281     @@ -164,6 +164,7 @@ struct pcie_port {
1282     struct irq_domain *irq_domain;
1283     struct irq_domain *msi_domain;
1284     dma_addr_t msi_data;
1285     + struct page *msi_page;
1286     u32 num_vectors;
1287     u32 irq_status[MAX_MSI_CTRLS];
1288     raw_spinlock_t lock;
1289     diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
1290     index 6a4e435bd35f..9b9c677ad3a0 100644
1291     --- a/drivers/pci/controller/pcie-rcar.c
1292     +++ b/drivers/pci/controller/pcie-rcar.c
1293     @@ -892,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
1294     {
1295     struct device *dev = pcie->dev;
1296     struct rcar_msi *msi = &pcie->msi;
1297     - unsigned long base;
1298     + phys_addr_t base;
1299     int err, i;
1300    
1301     mutex_init(&msi->lock);
1302     @@ -931,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
1303    
1304     /* setup MSI data target */
1305     msi->pages = __get_free_pages(GFP_KERNEL, 0);
1306     + if (!msi->pages) {
1307     + err = -ENOMEM;
1308     + goto err;
1309     + }
1310     base = virt_to_phys((void *)msi->pages);
1311    
1312     - rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
1313     - rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
1314     + rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
1315     + rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
1316    
1317     /* enable all MSI interrupts */
1318     rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
1319     diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
1320     index 7b1389d8e2a5..ea48cba5480b 100644
1321     --- a/drivers/pci/controller/pcie-xilinx.c
1322     +++ b/drivers/pci/controller/pcie-xilinx.c
1323     @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = {
1324     * xilinx_pcie_enable_msi - Enable MSI support
1325     * @port: PCIe port information
1326     */
1327     -static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
1328     +static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
1329     {
1330     phys_addr_t msg_addr;
1331    
1332     port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
1333     + if (!port->msi_pages)
1334     + return -ENOMEM;
1335     +
1336     msg_addr = virt_to_phys((void *)port->msi_pages);
1337     pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
1338     pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
1339     +
1340     + return 0;
1341     }
1342    
1343     /* INTx Functions */
1344     @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
1345     struct device *dev = port->dev;
1346     struct device_node *node = dev->of_node;
1347     struct device_node *pcie_intc_node;
1348     + int ret;
1349    
1350     /* Setup INTx */
1351     pcie_intc_node = of_get_next_child(node, NULL);
1352     @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
1353     return -ENODEV;
1354     }
1355    
1356     - xilinx_pcie_enable_msi(port);
1357     + ret = xilinx_pcie_enable_msi(port);
1358     + if (ret)
1359     + return ret;
1360     }
1361    
1362     return 0;
1363     diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
1364     index e2356a9c7088..182f9e3443ee 100644
1365     --- a/drivers/pci/hotplug/rpadlpar_core.c
1366     +++ b/drivers/pci/hotplug/rpadlpar_core.c
1367     @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
1368     if (rc == 0)
1369     break;
1370     }
1371     + of_node_put(parent);
1372    
1373     return dn;
1374     }
1375     @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
1376     return np;
1377     }
1378    
1379     +/* Returns a device_node with its reference count incremented */
1380     static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
1381     {
1382     struct device_node *dn;
1383     @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name)
1384     rc = dlpar_add_phb(drc_name, dn);
1385     break;
1386     }
1387     + of_node_put(dn);
1388    
1389     printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
1390     exit:
1391     @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name)
1392     rc = dlpar_remove_pci_slot(drc_name, dn);
1393     break;
1394     }
1395     + of_node_put(dn);
1396     vm_unmap_aliases();
1397    
1398     printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
1399     diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
1400     index 37d0c15c9eeb..72db2e0ebced 100644
1401     --- a/drivers/pci/switch/switchtec.c
1402     +++ b/drivers/pci/switch/switchtec.c
1403     @@ -1116,7 +1116,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1404     if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1405     return 0;
1406    
1407     - if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
1408     + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
1409     + eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
1410     return 0;
1411    
1412     dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1413     diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
1414     index e5d5b1adb5a9..ac784ac66ac3 100644
1415     --- a/drivers/platform/chrome/cros_ec_proto.c
1416     +++ b/drivers/platform/chrome/cros_ec_proto.c
1417     @@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev,
1418     else
1419     xfer_fxn = ec_dev->cmd_xfer;
1420    
1421     + if (!xfer_fxn) {
1422     + /*
1423     + * This error can happen if a communication error happened and
1424     + * the EC is trying to use protocol v2, on an underlying
1425     + * communication mechanism that does not support v2.
1426     + */
1427     + dev_err_once(ec_dev->dev,
1428     + "missing EC transfer API, cannot send command\n");
1429     + return -EIO;
1430     + }
1431     +
1432     ret = (*xfer_fxn)(ec_dev, msg);
1433     if (msg->result == EC_RES_IN_PROGRESS) {
1434     int i;
1435     diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
1436     index e7edc8c63936..4ad9d127f2f5 100644
1437     --- a/drivers/platform/x86/intel_pmc_ipc.c
1438     +++ b/drivers/platform/x86/intel_pmc_ipc.c
1439     @@ -776,13 +776,17 @@ static int ipc_create_pmc_devices(void)
1440     if (ret) {
1441     dev_err(ipcdev.dev, "Failed to add punit platform device\n");
1442     platform_device_unregister(ipcdev.tco_dev);
1443     + return ret;
1444     }
1445    
1446     if (!ipcdev.telem_res_inval) {
1447     ret = ipc_create_telemetry_device();
1448     - if (ret)
1449     + if (ret) {
1450     dev_warn(ipcdev.dev,
1451     "Failed to add telemetry platform device\n");
1452     + platform_device_unregister(ipcdev.punit_dev);
1453     + platform_device_unregister(ipcdev.tco_dev);
1454     + }
1455     }
1456    
1457     return ret;
1458     diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
1459     index b91b1d2999dc..d19307f791c6 100644
1460     --- a/drivers/power/supply/max14656_charger_detector.c
1461     +++ b/drivers/power/supply/max14656_charger_detector.c
1462     @@ -280,6 +280,13 @@ static int max14656_probe(struct i2c_client *client,
1463    
1464     INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
1465    
1466     + chip->detect_psy = devm_power_supply_register(dev,
1467     + &chip->psy_desc, &psy_cfg);
1468     + if (IS_ERR(chip->detect_psy)) {
1469     + dev_err(dev, "power_supply_register failed\n");
1470     + return -EINVAL;
1471     + }
1472     +
1473     ret = devm_request_irq(dev, chip->irq, max14656_irq,
1474     IRQF_TRIGGER_FALLING,
1475     MAX14656_NAME, chip);
1476     @@ -289,13 +296,6 @@ static int max14656_probe(struct i2c_client *client,
1477     }
1478     enable_irq_wake(chip->irq);
1479    
1480     - chip->detect_psy = devm_power_supply_register(dev,
1481     - &chip->psy_desc, &psy_cfg);
1482     - if (IS_ERR(chip->detect_psy)) {
1483     - dev_err(dev, "power_supply_register failed\n");
1484     - return -EINVAL;
1485     - }
1486     -
1487     schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
1488    
1489     return 0;
1490     diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
1491     index 1581f6ab1b1f..c45e5719ba17 100644
1492     --- a/drivers/pwm/core.c
1493     +++ b/drivers/pwm/core.c
1494     @@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
1495     if (IS_ENABLED(CONFIG_OF))
1496     of_pwmchip_add(chip);
1497    
1498     - pwmchip_sysfs_export(chip);
1499     -
1500     out:
1501     mutex_unlock(&pwm_lock);
1502     +
1503     + if (!ret)
1504     + pwmchip_sysfs_export(chip);
1505     +
1506     return ret;
1507     }
1508     EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
1509     @@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip)
1510     unsigned int i;
1511     int ret = 0;
1512    
1513     - pwmchip_sysfs_unexport_children(chip);
1514     + pwmchip_sysfs_unexport(chip);
1515    
1516     mutex_lock(&pwm_lock);
1517    
1518     @@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip)
1519    
1520     free_pwms(chip);
1521    
1522     - pwmchip_sysfs_unexport(chip);
1523     -
1524     out:
1525     mutex_unlock(&pwm_lock);
1526     return ret;
1527     diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
1528     index c1ed641b3e26..f6e738ad7bd9 100644
1529     --- a/drivers/pwm/pwm-meson.c
1530     +++ b/drivers/pwm/pwm-meson.c
1531     @@ -111,6 +111,10 @@ struct meson_pwm {
1532     const struct meson_pwm_data *data;
1533     void __iomem *base;
1534     u8 inverter_mask;
1535     + /*
1536     + * Protects register (write) access to the REG_MISC_AB register
1537     + * that is shared between the two PWMs.
1538     + */
1539     spinlock_t lock;
1540     };
1541    
1542     @@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1543     {
1544     u32 value, clk_shift, clk_enable, enable;
1545     unsigned int offset;
1546     + unsigned long flags;
1547    
1548     switch (id) {
1549     case 0:
1550     @@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1551     return;
1552     }
1553    
1554     + spin_lock_irqsave(&meson->lock, flags);
1555     +
1556     value = readl(meson->base + REG_MISC_AB);
1557     value &= ~(MISC_CLK_DIV_MASK << clk_shift);
1558     value |= channel->pre_div << clk_shift;
1559     @@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
1560     value = readl(meson->base + REG_MISC_AB);
1561     value |= enable;
1562     writel(value, meson->base + REG_MISC_AB);
1563     +
1564     + spin_unlock_irqrestore(&meson->lock, flags);
1565     }
1566    
1567     static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
1568     {
1569     u32 value, enable;
1570     + unsigned long flags;
1571    
1572     switch (id) {
1573     case 0:
1574     @@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
1575     return;
1576     }
1577    
1578     + spin_lock_irqsave(&meson->lock, flags);
1579     +
1580     value = readl(meson->base + REG_MISC_AB);
1581     value &= ~enable;
1582     writel(value, meson->base + REG_MISC_AB);
1583     +
1584     + spin_unlock_irqrestore(&meson->lock, flags);
1585     }
1586    
1587     static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1588     @@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1589     {
1590     struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
1591     struct meson_pwm *meson = to_meson_pwm(chip);
1592     - unsigned long flags;
1593     int err = 0;
1594    
1595     if (!state)
1596     return -EINVAL;
1597    
1598     - spin_lock_irqsave(&meson->lock, flags);
1599     -
1600     if (!state->enabled) {
1601     meson_pwm_disable(meson, pwm->hwpwm);
1602     channel->state.enabled = false;
1603    
1604     - goto unlock;
1605     + return 0;
1606     }
1607    
1608     if (state->period != channel->state.period ||
1609     @@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1610     err = meson_pwm_calc(meson, channel, pwm->hwpwm,
1611     state->duty_cycle, state->period);
1612     if (err < 0)
1613     - goto unlock;
1614     + return err;
1615    
1616     channel->state.polarity = state->polarity;
1617     channel->state.period = state->period;
1618     @@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
1619     channel->state.enabled = true;
1620     }
1621    
1622     -unlock:
1623     - spin_unlock_irqrestore(&meson->lock, flags);
1624     - return err;
1625     + return 0;
1626     }
1627    
1628     static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
1629     diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
1630     index f7b8a86fa5c5..ad4a40c0f27c 100644
1631     --- a/drivers/pwm/pwm-tiehrpwm.c
1632     +++ b/drivers/pwm/pwm-tiehrpwm.c
1633     @@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
1634     }
1635    
1636     /* Update shadow register first before modifying active register */
1637     + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
1638     + AQSFRC_RLDCSF_ZRO);
1639     ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
1640     /*
1641     * Changes to immediate action on Action Qualifier. This puts
1642     diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
1643     index 7c71cdb8a9d8..1c64fd8e9234 100644
1644     --- a/drivers/pwm/sysfs.c
1645     +++ b/drivers/pwm/sysfs.c
1646     @@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
1647     }
1648    
1649     void pwmchip_sysfs_unexport(struct pwm_chip *chip)
1650     -{
1651     - struct device *parent;
1652     -
1653     - parent = class_find_device(&pwm_class, NULL, chip,
1654     - pwmchip_sysfs_match);
1655     - if (parent) {
1656     - /* for class_find_device() */
1657     - put_device(parent);
1658     - device_unregister(parent);
1659     - }
1660     -}
1661     -
1662     -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
1663     {
1664     struct device *parent;
1665     unsigned int i;
1666     @@ -429,6 +416,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
1667     }
1668    
1669     put_device(parent);
1670     + device_unregister(parent);
1671     }
1672    
1673     static int __init pwm_sysfs_init(void)
1674     diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
1675     index bad0e0ea4f30..ef989a15aefc 100644
1676     --- a/drivers/rapidio/rio_cm.c
1677     +++ b/drivers/rapidio/rio_cm.c
1678     @@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev,
1679     mutex_init(&cm->rx_lock);
1680     riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
1681     cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
1682     + if (!cm->rx_wq) {
1683     + riocm_error("failed to allocate IBMBOX_%d on %s",
1684     + cmbox, mport->name);
1685     + rio_release_outb_mbox(mport, cmbox);
1686     + kfree(cm);
1687     + return -ENOMEM;
1688     + }
1689     +
1690     INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
1691    
1692     cm->tx_slot = 0;
1693     diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
1694     index de3f2a097451..1f1a05a90d3d 100644
1695     --- a/drivers/scsi/qla2xxx/qla_gs.c
1696     +++ b/drivers/scsi/qla2xxx/qla_gs.c
1697     @@ -3261,6 +3261,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
1698     "Async done-%s res %x, WWPN %8phC \n",
1699     sp->name, res, fcport->port_name);
1700    
1701     + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1702     +
1703     if (res == QLA_FUNCTION_TIMEOUT)
1704     return;
1705    
1706     @@ -4604,6 +4606,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
1707    
1708     done_free_sp:
1709     sp->free(sp);
1710     + fcport->flags &= ~FCF_ASYNC_SENT;
1711     done:
1712     return rval;
1713     }
1714     diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
1715     index 4e931fdf4d09..011a40b5fb49 100644
1716     --- a/drivers/soc/mediatek/mtk-pmic-wrap.c
1717     +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
1718     @@ -1104,7 +1104,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
1719     static int pwrap_init_cipher(struct pmic_wrapper *wrp)
1720     {
1721     int ret;
1722     - u32 rdata;
1723     + u32 rdata = 0;
1724    
1725     pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
1726     pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
1727     diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
1728     index d44d0e687ab8..2a43d6e99962 100644
1729     --- a/drivers/soc/renesas/renesas-soc.c
1730     +++ b/drivers/soc/renesas/renesas-soc.c
1731     @@ -285,6 +285,9 @@ static int __init renesas_soc_init(void)
1732     /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
1733     if ((product & 0x7fff) == 0x5210)
1734     product ^= 0x11;
1735     + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
1736     + if ((product & 0x7fff) == 0x5211)
1737     + product ^= 0x12;
1738     if (soc->id && ((product >> 8) & 0xff) != soc->id) {
1739     pr_warn("SoC mismatch (product = 0x%x)\n", product);
1740     return -ENODEV;
1741     diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
1742     index 96882ffde67e..3b81e1d75a97 100644
1743     --- a/drivers/soc/rockchip/grf.c
1744     +++ b/drivers/soc/rockchip/grf.c
1745     @@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
1746     };
1747    
1748     #define RK3288_GRF_SOC_CON0 0x244
1749     +#define RK3288_GRF_SOC_CON2 0x24c
1750    
1751     static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
1752     { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
1753     + { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
1754     };
1755    
1756     static const struct rockchip_grf_info rk3288_grf __initconst = {
1757     diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
1758     index 729be74621e3..f41333817c50 100644
1759     --- a/drivers/spi/spi-pxa2xx.c
1760     +++ b/drivers/spi/spi-pxa2xx.c
1761     @@ -1416,12 +1416,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1762    
1763     static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
1764     {
1765     - struct device *dev = param;
1766     -
1767     - if (dev != chan->device->dev->parent)
1768     - return false;
1769     -
1770     - return true;
1771     + return param == chan->device->dev;
1772     }
1773    
1774     static struct pxa2xx_spi_master *
1775     diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
1776     index a2c9bfae3d86..b139713289a4 100644
1777     --- a/drivers/thermal/qcom/tsens.c
1778     +++ b/drivers/thermal/qcom/tsens.c
1779     @@ -171,7 +171,8 @@ static int tsens_probe(struct platform_device *pdev)
1780     if (tmdev->ops->calibrate) {
1781     ret = tmdev->ops->calibrate(tmdev);
1782     if (ret < 0) {
1783     - dev_err(dev, "tsens calibration failed\n");
1784     + if (ret != -EPROBE_DEFER)
1785     + dev_err(dev, "tsens calibration failed\n");
1786     return ret;
1787     }
1788     }
1789     diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
1790     index 7aed5337bdd3..704c8ad045bb 100644
1791     --- a/drivers/thermal/rcar_gen3_thermal.c
1792     +++ b/drivers/thermal/rcar_gen3_thermal.c
1793     @@ -328,6 +328,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
1794     static int rcar_gen3_thermal_remove(struct platform_device *pdev)
1795     {
1796     struct device *dev = &pdev->dev;
1797     + struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev);
1798     +
1799     + rcar_thermal_irq_set(priv, false);
1800    
1801     pm_runtime_put(dev);
1802     pm_runtime_disable(dev);
1803     diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
1804     index d31b975dd3fd..284e8d052fc3 100644
1805     --- a/drivers/tty/serial/8250/8250_dw.c
1806     +++ b/drivers/tty/serial/8250/8250_dw.c
1807     @@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
1808    
1809     static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
1810     {
1811     - return param == chan->device->dev->parent;
1812     + return param == chan->device->dev;
1813     }
1814    
1815     /*
1816     @@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
1817     data->uart_16550_compatible = true;
1818     }
1819    
1820     - /* Platforms with iDMA */
1821     + /* Platforms with iDMA 64-bit */
1822     if (platform_get_resource_byname(to_platform_device(p->dev),
1823     IORESOURCE_MEM, "lpss_priv")) {
1824     data->dma.rx_param = p->dev->parent;
1825     diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
1826     index 82bed9810be6..62a0060d39d8 100644
1827     --- a/drivers/usb/typec/fusb302/fusb302.c
1828     +++ b/drivers/usb/typec/fusb302/fusb302.c
1829     @@ -641,6 +641,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
1830     return ret;
1831     chip->intr_togdone = false;
1832     } else {
1833     + /* Datasheet says vconn MUST be off when toggling */
1834     + WARN(chip->vconn_on, "Vconn is on during toggle start");
1835     /* unmask TOGDONE interrupt */
1836     ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA,
1837     FUSB_REG_MASKA_TOGDONE);
1838     diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
1839     index 64833879f75d..7a386fb30bf1 100644
1840     --- a/drivers/vfio/vfio.c
1841     +++ b/drivers/vfio/vfio.c
1842     @@ -34,6 +34,7 @@
1843     #include <linux/uaccess.h>
1844     #include <linux/vfio.h>
1845     #include <linux/wait.h>
1846     +#include <linux/sched/signal.h>
1847    
1848     #define DRIVER_VERSION "0.3"
1849     #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
1850     @@ -904,30 +905,17 @@ void *vfio_device_data(struct vfio_device *device)
1851     }
1852     EXPORT_SYMBOL_GPL(vfio_device_data);
1853    
1854     -/* Given a referenced group, check if it contains the device */
1855     -static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
1856     -{
1857     - struct vfio_device *device;
1858     -
1859     - device = vfio_group_get_device(group, dev);
1860     - if (!device)
1861     - return false;
1862     -
1863     - vfio_device_put(device);
1864     - return true;
1865     -}
1866     -
1867     /*
1868     * Decrement the device reference count and wait for the device to be
1869     * removed. Open file descriptors for the device... */
1870     void *vfio_del_group_dev(struct device *dev)
1871     {
1872     + DEFINE_WAIT_FUNC(wait, woken_wake_function);
1873     struct vfio_device *device = dev_get_drvdata(dev);
1874     struct vfio_group *group = device->group;
1875     void *device_data = device->device_data;
1876     struct vfio_unbound_dev *unbound;
1877     unsigned int i = 0;
1878     - long ret;
1879     bool interrupted = false;
1880    
1881     /*
1882     @@ -964,6 +952,8 @@ void *vfio_del_group_dev(struct device *dev)
1883     * interval with counter to allow the driver to take escalating
1884     * measures to release the device if it has the ability to do so.
1885     */
1886     + add_wait_queue(&vfio.release_q, &wait);
1887     +
1888     do {
1889     device = vfio_group_get_device(group, dev);
1890     if (!device)
1891     @@ -975,12 +965,10 @@ void *vfio_del_group_dev(struct device *dev)
1892     vfio_device_put(device);
1893    
1894     if (interrupted) {
1895     - ret = wait_event_timeout(vfio.release_q,
1896     - !vfio_dev_present(group, dev), HZ * 10);
1897     + wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
1898     } else {
1899     - ret = wait_event_interruptible_timeout(vfio.release_q,
1900     - !vfio_dev_present(group, dev), HZ * 10);
1901     - if (ret == -ERESTARTSYS) {
1902     + wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
1903     + if (signal_pending(current)) {
1904     interrupted = true;
1905     dev_warn(dev,
1906     "Device is currently in use, task"
1907     @@ -989,8 +977,10 @@ void *vfio_del_group_dev(struct device *dev)
1908     current->comm, task_pid_nr(current));
1909     }
1910     }
1911     - } while (ret <= 0);
1912    
1913     + } while (1);
1914     +
1915     + remove_wait_queue(&vfio.release_q, &wait);
1916     /*
1917     * In order to support multiple devices per group, devices can be
1918     * plucked from the group while other devices in the group are still
1919     diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
1920     index 463028543173..59e1cae57948 100644
1921     --- a/drivers/video/fbdev/hgafb.c
1922     +++ b/drivers/video/fbdev/hgafb.c
1923     @@ -285,6 +285,8 @@ static int hga_card_detect(void)
1924     hga_vram_len = 0x08000;
1925    
1926     hga_vram = ioremap(0xb0000, hga_vram_len);
1927     + if (!hga_vram)
1928     + goto error;
1929    
1930     if (request_region(0x3b0, 12, "hgafb"))
1931     release_io_ports = 1;
1932     diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
1933     index ecdcf358ad5e..ffcf553719a3 100644
1934     --- a/drivers/video/fbdev/imsttfb.c
1935     +++ b/drivers/video/fbdev/imsttfb.c
1936     @@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1937     info->fix.smem_start = addr;
1938     info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
1939     0x400000 : 0x800000);
1940     + if (!info->screen_base) {
1941     + release_mem_region(addr, size);
1942     + framebuffer_release(info);
1943     + return -ENOMEM;
1944     + }
1945     info->fix.mmio_start = addr + 0x800000;
1946     par->dc_regs = ioremap(addr + 0x800000, 0x1000);
1947     par->cmap_regs_phys = addr + 0x840000;
1948     diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
1949     index 5ea8909a41f9..b165c46aca74 100644
1950     --- a/drivers/watchdog/Kconfig
1951     +++ b/drivers/watchdog/Kconfig
1952     @@ -1967,6 +1967,7 @@ comment "Watchdog Pretimeout Governors"
1953    
1954     config WATCHDOG_PRETIMEOUT_GOV
1955     bool "Enable watchdog pretimeout governors"
1956     + depends on WATCHDOG_CORE
1957     help
1958     The option allows to select watchdog pretimeout governors.
1959    
1960     diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
1961     index 2b52514eaa86..7e7bdcbbc741 100644
1962     --- a/drivers/watchdog/imx2_wdt.c
1963     +++ b/drivers/watchdog/imx2_wdt.c
1964     @@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
1965     static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
1966     unsigned int new_timeout)
1967     {
1968     - __imx2_wdt_set_timeout(wdog, new_timeout);
1969     + unsigned int actual;
1970    
1971     + actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
1972     + __imx2_wdt_set_timeout(wdog, actual);
1973     wdog->timeout = new_timeout;
1974     return 0;
1975     }
1976     diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
1977     index 39843fa7e11b..920d350df37b 100644
1978     --- a/fs/configfs/dir.c
1979     +++ b/fs/configfs/dir.c
1980     @@ -1755,12 +1755,19 @@ int configfs_register_group(struct config_group *parent_group,
1981    
1982     inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1983     ret = create_default_group(parent_group, group);
1984     - if (!ret) {
1985     - spin_lock(&configfs_dirent_lock);
1986     - configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1987     - spin_unlock(&configfs_dirent_lock);
1988     - }
1989     + if (ret)
1990     + goto err_out;
1991     +
1992     + spin_lock(&configfs_dirent_lock);
1993     + configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1994     + spin_unlock(&configfs_dirent_lock);
1995     + inode_unlock(d_inode(parent));
1996     + return 0;
1997     +err_out:
1998     inode_unlock(d_inode(parent));
1999     + mutex_lock(&subsys->su_mutex);
2000     + unlink_group(group);
2001     + mutex_unlock(&subsys->su_mutex);
2002     return ret;
2003     }
2004     EXPORT_SYMBOL(configfs_register_group);
2005     diff --git a/fs/dax.c b/fs/dax.c
2006     index 004c8ac1117c..75a289c31c7e 100644
2007     --- a/fs/dax.c
2008     +++ b/fs/dax.c
2009     @@ -908,7 +908,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
2010     goto unlock_pmd;
2011    
2012     flush_cache_page(vma, address, pfn);
2013     - pmd = pmdp_huge_clear_flush(vma, address, pmdp);
2014     + pmd = pmdp_invalidate(vma, address, pmdp);
2015     pmd = pmd_wrprotect(pmd);
2016     pmd = pmd_mkclean(pmd);
2017     set_pmd_at(vma->vm_mm, address, pmdp, pmd);
2018     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2019     index a4b6eacf22ea..44ea7ac69ef4 100644
2020     --- a/fs/f2fs/f2fs.h
2021     +++ b/fs/f2fs/f2fs.h
2022     @@ -1744,6 +1744,7 @@ enospc:
2023     return -ENOSPC;
2024     }
2025    
2026     +void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
2027     static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2028     struct inode *inode,
2029     block_t count)
2030     @@ -1752,13 +1753,21 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2031    
2032     spin_lock(&sbi->stat_lock);
2033     f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2034     - f2fs_bug_on(sbi, inode->i_blocks < sectors);
2035     sbi->total_valid_block_count -= (block_t)count;
2036     if (sbi->reserved_blocks &&
2037     sbi->current_reserved_blocks < sbi->reserved_blocks)
2038     sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2039     sbi->current_reserved_blocks + count);
2040     spin_unlock(&sbi->stat_lock);
2041     + if (unlikely(inode->i_blocks < sectors)) {
2042     + f2fs_msg(sbi->sb, KERN_WARNING,
2043     + "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2044     + inode->i_ino,
2045     + (unsigned long long)inode->i_blocks,
2046     + (unsigned long long)sectors);
2047     + set_sbi_flag(sbi, SBI_NEED_FSCK);
2048     + return;
2049     + }
2050     f2fs_i_blocks_write(inode, count, false, true);
2051     }
2052    
2053     @@ -2488,7 +2497,9 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2054    
2055     static inline int inline_xattr_size(struct inode *inode)
2056     {
2057     - return get_inline_xattr_addrs(inode) * sizeof(__le32);
2058     + if (f2fs_has_inline_xattr(inode))
2059     + return get_inline_xattr_addrs(inode) * sizeof(__le32);
2060     + return 0;
2061     }
2062    
2063     static inline int f2fs_has_inline_data(struct inode *inode)
2064     @@ -2727,7 +2738,6 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
2065    
2066     bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
2067     block_t blkaddr, int type);
2068     -void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
2069     static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
2070     block_t blkaddr, int type)
2071     {
2072     diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
2073     index dd608b819a3c..0f31df01e36c 100644
2074     --- a/fs/f2fs/inode.c
2075     +++ b/fs/f2fs/inode.c
2076     @@ -179,8 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
2077    
2078     if (provided != calculated)
2079     f2fs_msg(sbi->sb, KERN_WARNING,
2080     - "checksum invalid, ino = %x, %x vs. %x",
2081     - ino_of_node(page), provided, calculated);
2082     + "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
2083     + page->index, ino_of_node(page), provided, calculated);
2084    
2085     return provided == calculated;
2086     }
2087     @@ -476,6 +476,7 @@ make_now:
2088     return inode;
2089    
2090     bad_inode:
2091     + f2fs_inode_synced(inode);
2092     iget_failed(inode);
2093     trace_f2fs_iget_exit(inode, ret);
2094     return ERR_PTR(ret);
2095     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2096     index 19a0d83aae65..e2d9edad758c 100644
2097     --- a/fs/f2fs/node.c
2098     +++ b/fs/f2fs/node.c
2099     @@ -1180,8 +1180,14 @@ int f2fs_remove_inode_page(struct inode *inode)
2100     f2fs_put_dnode(&dn);
2101     return -EIO;
2102     }
2103     - f2fs_bug_on(F2FS_I_SB(inode),
2104     - inode->i_blocks != 0 && inode->i_blocks != 8);
2105     +
2106     + if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
2107     + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
2108     + "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
2109     + inode->i_ino,
2110     + (unsigned long long)inode->i_blocks);
2111     + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
2112     + }
2113    
2114     /* will put inode & node pages */
2115     err = truncate_node(&dn);
2116     @@ -1276,9 +1282,10 @@ static int read_node_page(struct page *page, int op_flags)
2117     int err;
2118    
2119     if (PageUptodate(page)) {
2120     -#ifdef CONFIG_F2FS_CHECK_FS
2121     - f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
2122     -#endif
2123     + if (!f2fs_inode_chksum_verify(sbi, page)) {
2124     + ClearPageUptodate(page);
2125     + return -EBADMSG;
2126     + }
2127     return LOCKED_PAGE;
2128     }
2129    
2130     @@ -2073,6 +2080,9 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
2131     if (unlikely(nid == 0))
2132     return false;
2133    
2134     + if (unlikely(f2fs_check_nid_range(sbi, nid)))
2135     + return false;
2136     +
2137     i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2138     i->nid = nid;
2139     i->state = FREE_NID;
2140     diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
2141     index ae0e5f2e67b4..bf5c5f4fa77e 100644
2142     --- a/fs/f2fs/recovery.c
2143     +++ b/fs/f2fs/recovery.c
2144     @@ -485,7 +485,15 @@ retry_dn:
2145     goto err;
2146    
2147     f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
2148     - f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
2149     +
2150     + if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
2151     + f2fs_msg(sbi->sb, KERN_WARNING,
2152     + "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
2153     + inode->i_ino, ofs_of_node(dn.node_page),
2154     + ofs_of_node(page));
2155     + err = -EFAULT;
2156     + goto err;
2157     + }
2158    
2159     for (; start < end; start++, dn.ofs_in_node++) {
2160     block_t src, dest;
2161     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
2162     index 03fa2c4d3d79..8fc3edb6760c 100644
2163     --- a/fs/f2fs/segment.c
2164     +++ b/fs/f2fs/segment.c
2165     @@ -3069,13 +3069,18 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
2166     {
2167     int err;
2168     struct f2fs_sb_info *sbi = fio->sbi;
2169     + unsigned int segno;
2170    
2171     fio->new_blkaddr = fio->old_blkaddr;
2172     /* i/o temperature is needed for passing down write hints */
2173     __get_segment_type(fio);
2174    
2175     - f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
2176     - GET_SEGNO(sbi, fio->new_blkaddr))->type));
2177     + segno = GET_SEGNO(sbi, fio->new_blkaddr);
2178     +
2179     + if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
2180     + set_sbi_flag(sbi, SBI_NEED_FSCK);
2181     + return -EFAULT;
2182     + }
2183    
2184     stat_inc_inplace_blocks(fio->sbi);
2185    
2186     diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
2187     index b3d9e317ff0c..5079532cb176 100644
2188     --- a/fs/f2fs/segment.h
2189     +++ b/fs/f2fs/segment.h
2190     @@ -660,7 +660,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
2191     static inline int check_block_count(struct f2fs_sb_info *sbi,
2192     int segno, struct f2fs_sit_entry *raw_sit)
2193     {
2194     -#ifdef CONFIG_F2FS_CHECK_FS
2195     bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
2196     int valid_blocks = 0;
2197     int cur_pos = 0, next_pos;
2198     @@ -687,7 +686,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
2199     set_sbi_flag(sbi, SBI_NEED_FSCK);
2200     return -EINVAL;
2201     }
2202     -#endif
2203     +
2204     /* check segment usage, and check boundary of a given segment number */
2205     if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
2206     || segno > TOTAL_SEGS(sbi) - 1)) {
2207     diff --git a/fs/fat/file.c b/fs/fat/file.c
2208     index 4f3d72fb1e60..f86ea08bd6ce 100644
2209     --- a/fs/fat/file.c
2210     +++ b/fs/fat/file.c
2211     @@ -193,12 +193,17 @@ static int fat_file_release(struct inode *inode, struct file *filp)
2212     int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
2213     {
2214     struct inode *inode = filp->f_mapping->host;
2215     - int res, err;
2216     + int err;
2217     +
2218     + err = __generic_file_fsync(filp, start, end, datasync);
2219     + if (err)
2220     + return err;
2221    
2222     - res = generic_file_fsync(filp, start, end, datasync);
2223     err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
2224     + if (err)
2225     + return err;
2226    
2227     - return res ? res : err;
2228     + return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
2229     }
2230    
2231    
2232     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2233     index 249de20f752a..6ee471b72a34 100644
2234     --- a/fs/fuse/dev.c
2235     +++ b/fs/fuse/dev.c
2236     @@ -1681,7 +1681,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
2237     offset = outarg->offset & ~PAGE_MASK;
2238     file_size = i_size_read(inode);
2239    
2240     - num = outarg->size;
2241     + num = min(outarg->size, fc->max_write);
2242     if (outarg->offset > file_size)
2243     num = 0;
2244     else if (outarg->offset + num > file_size)
2245     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2246     index 418fa9c78186..db0beefe65ec 100644
2247     --- a/fs/nfsd/nfs4xdr.c
2248     +++ b/fs/nfsd/nfs4xdr.c
2249     @@ -2413,8 +2413,10 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2250     __be32 status;
2251     int err;
2252     struct nfs4_acl *acl = NULL;
2253     +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2254     void *context = NULL;
2255     int contextlen;
2256     +#endif
2257     bool contextsupport = false;
2258     struct nfsd4_compoundres *resp = rqstp->rq_resp;
2259     u32 minorversion = resp->cstate.minorversion;
2260     @@ -2899,12 +2901,14 @@ out_acl:
2261     *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA);
2262     }
2263    
2264     +#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2265     if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
2266     status = nfsd4_encode_security_label(xdr, rqstp, context,
2267     contextlen);
2268     if (status)
2269     goto out;
2270     }
2271     +#endif
2272    
2273     attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
2274     write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
2275     diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
2276     index a7e107309f76..db351247892d 100644
2277     --- a/fs/nfsd/vfs.h
2278     +++ b/fs/nfsd/vfs.h
2279     @@ -120,8 +120,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra);
2280    
2281     static inline int fh_want_write(struct svc_fh *fh)
2282     {
2283     - int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
2284     + int ret;
2285    
2286     + if (fh->fh_want_write)
2287     + return 0;
2288     + ret = mnt_want_write(fh->fh_export->ex_path.mnt);
2289     if (!ret)
2290     fh->fh_want_write = true;
2291     return ret;
2292     diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
2293     index 0c810f20f778..00338b828f76 100644
2294     --- a/fs/overlayfs/file.c
2295     +++ b/fs/overlayfs/file.c
2296     @@ -11,6 +11,7 @@
2297     #include <linux/mount.h>
2298     #include <linux/xattr.h>
2299     #include <linux/uio.h>
2300     +#include <linux/uaccess.h>
2301     #include "overlayfs.h"
2302    
2303     static char ovl_whatisit(struct inode *inode, struct inode *realinode)
2304     @@ -29,10 +30,11 @@ static struct file *ovl_open_realfile(const struct file *file,
2305     struct inode *inode = file_inode(file);
2306     struct file *realfile;
2307     const struct cred *old_cred;
2308     + int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
2309    
2310     old_cred = ovl_override_creds(inode->i_sb);
2311     - realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
2312     - realinode, current_cred());
2313     + realfile = open_with_fake_path(&file->f_path, flags, realinode,
2314     + current_cred());
2315     revert_creds(old_cred);
2316    
2317     pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
2318     @@ -50,7 +52,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags)
2319     int err;
2320    
2321     /* No atime modificaton on underlying */
2322     - flags |= O_NOATIME;
2323     + flags |= O_NOATIME | FMODE_NONOTIFY;
2324    
2325     /* If some flag changed that cannot be changed then something's amiss */
2326     if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
2327     @@ -144,11 +146,47 @@ static int ovl_release(struct inode *inode, struct file *file)
2328    
2329     static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
2330     {
2331     - struct inode *realinode = ovl_inode_real(file_inode(file));
2332     + struct inode *inode = file_inode(file);
2333     + struct fd real;
2334     + const struct cred *old_cred;
2335     + ssize_t ret;
2336     +
2337     + /*
2338     + * The two special cases below do not need to involve real fs,
2339     + * so we can optimizing concurrent callers.
2340     + */
2341     + if (offset == 0) {
2342     + if (whence == SEEK_CUR)
2343     + return file->f_pos;
2344     +
2345     + if (whence == SEEK_SET)
2346     + return vfs_setpos(file, 0, 0);
2347     + }
2348     +
2349     + ret = ovl_real_fdget(file, &real);
2350     + if (ret)
2351     + return ret;
2352     +
2353     + /*
2354     + * Overlay file f_pos is the master copy that is preserved
2355     + * through copy up and modified on read/write, but only real
2356     + * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose
2357     + * limitations that are more strict than ->s_maxbytes for specific
2358     + * files, so we use the real file to perform seeks.
2359     + */
2360     + inode_lock(inode);
2361     + real.file->f_pos = file->f_pos;
2362     +
2363     + old_cred = ovl_override_creds(inode->i_sb);
2364     + ret = vfs_llseek(real.file, offset, whence);
2365     + revert_creds(old_cred);
2366     +
2367     + file->f_pos = real.file->f_pos;
2368     + inode_unlock(inode);
2369     +
2370     + fdput(real);
2371    
2372     - return generic_file_llseek_size(file, offset, whence,
2373     - realinode->i_sb->s_maxbytes,
2374     - i_size_read(realinode));
2375     + return ret;
2376     }
2377    
2378     static void ovl_file_accessed(struct file *file)
2379     @@ -371,10 +409,68 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
2380     return ret;
2381     }
2382    
2383     -static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2384     +static unsigned int ovl_get_inode_flags(struct inode *inode)
2385     +{
2386     + unsigned int flags = READ_ONCE(inode->i_flags);
2387     + unsigned int ovl_iflags = 0;
2388     +
2389     + if (flags & S_SYNC)
2390     + ovl_iflags |= FS_SYNC_FL;
2391     + if (flags & S_APPEND)
2392     + ovl_iflags |= FS_APPEND_FL;
2393     + if (flags & S_IMMUTABLE)
2394     + ovl_iflags |= FS_IMMUTABLE_FL;
2395     + if (flags & S_NOATIME)
2396     + ovl_iflags |= FS_NOATIME_FL;
2397     +
2398     + return ovl_iflags;
2399     +}
2400     +
2401     +static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
2402     {
2403     long ret;
2404     struct inode *inode = file_inode(file);
2405     + unsigned int flags;
2406     + unsigned int old_flags;
2407     +
2408     + if (!inode_owner_or_capable(inode))
2409     + return -EACCES;
2410     +
2411     + if (get_user(flags, (int __user *) arg))
2412     + return -EFAULT;
2413     +
2414     + ret = mnt_want_write_file(file);
2415     + if (ret)
2416     + return ret;
2417     +
2418     + inode_lock(inode);
2419     +
2420     + /* Check the capability before cred override */
2421     + ret = -EPERM;
2422     + old_flags = ovl_get_inode_flags(inode);
2423     + if (((flags ^ old_flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
2424     + !capable(CAP_LINUX_IMMUTABLE))
2425     + goto unlock;
2426     +
2427     + ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
2428     + if (ret)
2429     + goto unlock;
2430     +
2431     + ret = ovl_real_ioctl(file, FS_IOC_SETFLAGS, arg);
2432     +
2433     + ovl_copyflags(ovl_inode_real(inode), inode);
2434     +unlock:
2435     + inode_unlock(inode);
2436     +
2437     + mnt_drop_write_file(file);
2438     +
2439     + return ret;
2440     +
2441     +}
2442     +
2443     +static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2444     +{
2445     + long ret;
2446    
2447     switch (cmd) {
2448     case FS_IOC_GETFLAGS:
2449     @@ -382,23 +478,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2450     break;
2451    
2452     case FS_IOC_SETFLAGS:
2453     - if (!inode_owner_or_capable(inode))
2454     - return -EACCES;
2455     -
2456     - ret = mnt_want_write_file(file);
2457     - if (ret)
2458     - return ret;
2459     -
2460     - ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
2461     - if (!ret) {
2462     - ret = ovl_real_ioctl(file, cmd, arg);
2463     -
2464     - inode_lock(inode);
2465     - ovl_copyflags(ovl_inode_real(inode), inode);
2466     - inode_unlock(inode);
2467     - }
2468     -
2469     - mnt_drop_write_file(file);
2470     + ret = ovl_ioctl_set_flags(file, arg);
2471     break;
2472    
2473     default:
2474     diff --git a/include/linux/pwm.h b/include/linux/pwm.h
2475     index 56518adc31dd..bd7d611d63e9 100644
2476     --- a/include/linux/pwm.h
2477     +++ b/include/linux/pwm.h
2478     @@ -639,7 +639,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
2479     #ifdef CONFIG_PWM_SYSFS
2480     void pwmchip_sysfs_export(struct pwm_chip *chip);
2481     void pwmchip_sysfs_unexport(struct pwm_chip *chip);
2482     -void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
2483     #else
2484     static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
2485     {
2486     @@ -648,10 +647,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
2487     static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
2488     {
2489     }
2490     -
2491     -static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
2492     -{
2493     -}
2494     #endif /* CONFIG_PWM_SYSFS */
2495    
2496     #endif /* __LINUX_PWM_H */
2497     diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
2498     index cc2d0c3b475b..1dfb75057580 100644
2499     --- a/include/net/bluetooth/hci_core.h
2500     +++ b/include/net/bluetooth/hci_core.h
2501     @@ -182,9 +182,6 @@ struct adv_info {
2502    
2503     #define HCI_MAX_SHORT_NAME_LENGTH 10
2504    
2505     -/* Min encryption key size to match with SMP */
2506     -#define HCI_MIN_ENC_KEY_SIZE 7
2507     -
2508     /* Default LE RPA expiry time, 15 minutes */
2509     #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
2510    
2511     diff --git a/init/initramfs.c b/init/initramfs.c
2512     index f6f4a1e4cd54..cd5fb00fcb54 100644
2513     --- a/init/initramfs.c
2514     +++ b/init/initramfs.c
2515     @@ -612,13 +612,12 @@ static int __init populate_rootfs(void)
2516     printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
2517     err = unpack_to_rootfs((char *)initrd_start,
2518     initrd_end - initrd_start);
2519     - if (!err) {
2520     - free_initrd();
2521     + if (!err)
2522     goto done;
2523     - } else {
2524     - clean_rootfs();
2525     - unpack_to_rootfs(__initramfs_start, __initramfs_size);
2526     - }
2527     +
2528     + clean_rootfs();
2529     + unpack_to_rootfs(__initramfs_start, __initramfs_size);
2530     +
2531     printk(KERN_INFO "rootfs image is not initramfs (%s)"
2532     "; looks like an initrd\n", err);
2533     fd = ksys_open("/initrd.image",
2534     @@ -632,7 +631,6 @@ static int __init populate_rootfs(void)
2535     written, initrd_end - initrd_start);
2536    
2537     ksys_close(fd);
2538     - free_initrd();
2539     }
2540     done:
2541     /* empty statement */;
2542     @@ -642,9 +640,9 @@ static int __init populate_rootfs(void)
2543     initrd_end - initrd_start);
2544     if (err)
2545     printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
2546     - free_initrd();
2547     #endif
2548     }
2549     + free_initrd();
2550     flush_delayed_fput();
2551     /*
2552     * Try loading default modules from initramfs. This gives
2553     diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2554     index c0d58f390c3b..bce7af1546d9 100644
2555     --- a/ipc/mqueue.c
2556     +++ b/ipc/mqueue.c
2557     @@ -391,7 +391,8 @@ static void mqueue_evict_inode(struct inode *inode)
2558     struct user_struct *user;
2559     unsigned long mq_bytes, mq_treesize;
2560     struct ipc_namespace *ipc_ns;
2561     - struct msg_msg *msg;
2562     + struct msg_msg *msg, *nmsg;
2563     + LIST_HEAD(tmp_msg);
2564    
2565     clear_inode(inode);
2566    
2567     @@ -402,10 +403,15 @@ static void mqueue_evict_inode(struct inode *inode)
2568     info = MQUEUE_I(inode);
2569     spin_lock(&info->lock);
2570     while ((msg = msg_get(info)) != NULL)
2571     - free_msg(msg);
2572     + list_add_tail(&msg->m_list, &tmp_msg);
2573     kfree(info->node_cache);
2574     spin_unlock(&info->lock);
2575    
2576     + list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
2577     + list_del(&msg->m_list);
2578     + free_msg(msg);
2579     + }
2580     +
2581     /* Total amount of bytes accounted for the mqueue */
2582     mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
2583     min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
2584     diff --git a/ipc/msgutil.c b/ipc/msgutil.c
2585     index 84598025a6ad..e65593742e2b 100644
2586     --- a/ipc/msgutil.c
2587     +++ b/ipc/msgutil.c
2588     @@ -18,6 +18,7 @@
2589     #include <linux/utsname.h>
2590     #include <linux/proc_ns.h>
2591     #include <linux/uaccess.h>
2592     +#include <linux/sched.h>
2593    
2594     #include "util.h"
2595    
2596     @@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len)
2597     pseg = &msg->next;
2598     while (len > 0) {
2599     struct msg_msgseg *seg;
2600     +
2601     + cond_resched();
2602     +
2603     alen = min(len, DATALEN_SEG);
2604     seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
2605     if (seg == NULL)
2606     @@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg)
2607     kfree(msg);
2608     while (seg != NULL) {
2609     struct msg_msgseg *tmp = seg->next;
2610     +
2611     + cond_resched();
2612     kfree(seg);
2613     seg = tmp;
2614     }
2615     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2616     index acc2305ad895..d3580a68dbef 100644
2617     --- a/kernel/bpf/verifier.c
2618     +++ b/kernel/bpf/verifier.c
2619     @@ -5743,7 +5743,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2620     insn->dst_reg,
2621     shift);
2622     insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
2623     - (1 << size * 8) - 1);
2624     + (1ULL << size * 8) - 1);
2625     }
2626     }
2627    
2628     diff --git a/kernel/sys.c b/kernel/sys.c
2629     index 123bd73046ec..096932a45046 100644
2630     --- a/kernel/sys.c
2631     +++ b/kernel/sys.c
2632     @@ -1919,7 +1919,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
2633     ((unsigned long)prctl_map->__m1 __op \
2634     (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
2635     error = __prctl_check_order(start_code, <, end_code);
2636     - error |= __prctl_check_order(start_data, <, end_data);
2637     + error |= __prctl_check_order(start_data,<=, end_data);
2638     error |= __prctl_check_order(start_brk, <=, brk);
2639     error |= __prctl_check_order(arg_start, <=, arg_end);
2640     error |= __prctl_check_order(env_start, <=, env_end);
2641     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2642     index 9a85c7ae7362..f8576509c7be 100644
2643     --- a/kernel/sysctl.c
2644     +++ b/kernel/sysctl.c
2645     @@ -2791,8 +2791,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2646     if (neg)
2647     continue;
2648     val = convmul * val / convdiv;
2649     - if ((min && val < *min) || (max && val > *max))
2650     - continue;
2651     + if ((min && val < *min) || (max && val > *max)) {
2652     + err = -EINVAL;
2653     + break;
2654     + }
2655     *i = val;
2656     } else {
2657     val = convdiv * (*i) / convmul;
2658     diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
2659     index c5e0cba3b39c..6b23cd584295 100644
2660     --- a/kernel/time/ntp.c
2661     +++ b/kernel/time/ntp.c
2662     @@ -698,7 +698,7 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
2663     time_constant = max(time_constant, 0l);
2664     }
2665    
2666     - if (txc->modes & ADJ_TAI && txc->constant > 0)
2667     + if (txc->modes & ADJ_TAI && txc->constant >= 0)
2668     *time_tai = txc->constant;
2669    
2670     if (txc->modes & ADJ_OFFSET)
2671     diff --git a/mm/Kconfig b/mm/Kconfig
2672     index de64ea658716..b457e94ae618 100644
2673     --- a/mm/Kconfig
2674     +++ b/mm/Kconfig
2675     @@ -700,12 +700,12 @@ config DEV_PAGEMAP_OPS
2676    
2677     config HMM
2678     bool
2679     + select MMU_NOTIFIER
2680     select MIGRATE_VMA_HELPER
2681    
2682     config HMM_MIRROR
2683     bool "HMM mirror CPU page table into a device page table"
2684     depends on ARCH_HAS_HMM
2685     - select MMU_NOTIFIER
2686     select HMM
2687     help
2688     Select HMM_MIRROR if you want to mirror range of the CPU page table of a
2689     diff --git a/mm/cma.c b/mm/cma.c
2690     index bfe9f5397165..476dfe13a701 100644
2691     --- a/mm/cma.c
2692     +++ b/mm/cma.c
2693     @@ -106,8 +106,10 @@ static int __init cma_activate_area(struct cma *cma)
2694    
2695     cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
2696    
2697     - if (!cma->bitmap)
2698     + if (!cma->bitmap) {
2699     + cma->count = 0;
2700     return -ENOMEM;
2701     + }
2702    
2703     WARN_ON_ONCE(!pfn_valid(pfn));
2704     zone = page_zone(pfn_to_page(pfn));
2705     @@ -369,23 +371,26 @@ err:
2706     #ifdef CONFIG_CMA_DEBUG
2707     static void cma_debug_show_areas(struct cma *cma)
2708     {
2709     - unsigned long next_zero_bit, next_set_bit;
2710     + unsigned long next_zero_bit, next_set_bit, nr_zero;
2711     unsigned long start = 0;
2712     - unsigned int nr_zero, nr_total = 0;
2713     + unsigned long nr_part, nr_total = 0;
2714     + unsigned long nbits = cma_bitmap_maxno(cma);
2715    
2716     mutex_lock(&cma->lock);
2717     pr_info("number of available pages: ");
2718     for (;;) {
2719     - next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
2720     - if (next_zero_bit >= cma->count)
2721     + next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
2722     + if (next_zero_bit >= nbits)
2723     break;
2724     - next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
2725     + next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
2726     nr_zero = next_set_bit - next_zero_bit;
2727     - pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
2728     - nr_total += nr_zero;
2729     + nr_part = nr_zero << cma->order_per_bit;
2730     + pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
2731     + next_zero_bit);
2732     + nr_total += nr_part;
2733     start = next_zero_bit + nr_zero;
2734     }
2735     - pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
2736     + pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
2737     mutex_unlock(&cma->lock);
2738     }
2739     #else
2740     diff --git a/mm/cma_debug.c b/mm/cma_debug.c
2741     index ad6723e9d110..3e0415076cc9 100644
2742     --- a/mm/cma_debug.c
2743     +++ b/mm/cma_debug.c
2744     @@ -58,7 +58,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
2745     mutex_lock(&cma->lock);
2746     for (;;) {
2747     start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
2748     - if (start >= cma->count)
2749     + if (start >= bitmap_maxno)
2750     break;
2751     end = find_next_bit(cma->bitmap, bitmap_maxno, start);
2752     maxchunk = max(end - start, maxchunk);
2753     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2754     index 0bbb033d7d8c..65179513c2b2 100644
2755     --- a/mm/hugetlb.c
2756     +++ b/mm/hugetlb.c
2757     @@ -1256,12 +1256,23 @@ void free_huge_page(struct page *page)
2758     ClearPagePrivate(page);
2759    
2760     /*
2761     - * A return code of zero implies that the subpool will be under its
2762     - * minimum size if the reservation is not restored after page is free.
2763     - * Therefore, force restore_reserve operation.
2764     + * If PagePrivate() was set on page, page allocation consumed a
2765     + * reservation. If the page was associated with a subpool, there
2766     + * would have been a page reserved in the subpool before allocation
2767     + * via hugepage_subpool_get_pages(). Since we are 'restoring' the
2768     + * reservtion, do not call hugepage_subpool_put_pages() as this will
2769     + * remove the reserved page from the subpool.
2770     */
2771     - if (hugepage_subpool_put_pages(spool, 1) == 0)
2772     - restore_reserve = true;
2773     + if (!restore_reserve) {
2774     + /*
2775     + * A return code of zero implies that the subpool will be
2776     + * under its minimum size if the reservation is not restored
2777     + * after page is free. Therefore, force restore_reserve
2778     + * operation.
2779     + */
2780     + if (hugepage_subpool_put_pages(spool, 1) == 0)
2781     + restore_reserve = true;
2782     + }
2783    
2784     spin_lock(&hugetlb_lock);
2785     clear_page_huge_active(page);
2786     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2787     index 8e6932a140b8..2d04bd2e1ced 100644
2788     --- a/mm/page_alloc.c
2789     +++ b/mm/page_alloc.c
2790     @@ -5937,13 +5937,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
2791     unsigned long *zone_end_pfn,
2792     unsigned long *ignored)
2793     {
2794     + unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
2795     + unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
2796     /* When hotadd a new node from cpu_up(), the node should be empty */
2797     if (!node_start_pfn && !node_end_pfn)
2798     return 0;
2799    
2800     /* Get the start and end of the zone */
2801     - *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2802     - *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2803     + *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
2804     + *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
2805     adjust_zone_range_for_zone_movable(nid, zone_type,
2806     node_start_pfn, node_end_pfn,
2807     zone_start_pfn, zone_end_pfn);
2808     diff --git a/mm/percpu.c b/mm/percpu.c
2809     index 41e58f3d8fbf..ff76fa0b7528 100644
2810     --- a/mm/percpu.c
2811     +++ b/mm/percpu.c
2812     @@ -988,7 +988,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
2813     /*
2814     * Search to find a fit.
2815     */
2816     - end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS;
2817     + end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
2818     + pcpu_chunk_map_bits(chunk));
2819     bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
2820     alloc_bits, align_mask);
2821     if (bit_off >= end)
2822     @@ -1721,6 +1722,7 @@ void free_percpu(void __percpu *ptr)
2823     struct pcpu_chunk *chunk;
2824     unsigned long flags;
2825     int off;
2826     + bool need_balance = false;
2827    
2828     if (!ptr)
2829     return;
2830     @@ -1742,7 +1744,7 @@ void free_percpu(void __percpu *ptr)
2831    
2832     list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
2833     if (pos != chunk) {
2834     - pcpu_schedule_balance_work();
2835     + need_balance = true;
2836     break;
2837     }
2838     }
2839     @@ -1750,6 +1752,9 @@ void free_percpu(void __percpu *ptr)
2840     trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2841    
2842     spin_unlock_irqrestore(&pcpu_lock, flags);
2843     +
2844     + if (need_balance)
2845     + pcpu_schedule_balance_work();
2846     }
2847     EXPORT_SYMBOL_GPL(free_percpu);
2848    
2849     diff --git a/mm/rmap.c b/mm/rmap.c
2850     index 85b7f9423352..f048c2651954 100644
2851     --- a/mm/rmap.c
2852     +++ b/mm/rmap.c
2853     @@ -926,7 +926,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
2854     continue;
2855    
2856     flush_cache_page(vma, address, page_to_pfn(page));
2857     - entry = pmdp_huge_clear_flush(vma, address, pmd);
2858     + entry = pmdp_invalidate(vma, address, pmd);
2859     entry = pmd_wrprotect(entry);
2860     entry = pmd_mkclean(entry);
2861     set_pmd_at(vma->vm_mm, address, pmd, entry);
2862     diff --git a/mm/slab.c b/mm/slab.c
2863     index 018d32496e8d..46f21e73db2f 100644
2864     --- a/mm/slab.c
2865     +++ b/mm/slab.c
2866     @@ -4326,8 +4326,12 @@ static int leaks_show(struct seq_file *m, void *p)
2867     * whole processing.
2868     */
2869     do {
2870     - set_store_user_clean(cachep);
2871     drain_cpu_caches(cachep);
2872     + /*
2873     + * drain_cpu_caches() could make kmemleak_object and
2874     + * debug_objects_cache dirty, so reset afterwards.
2875     + */
2876     + set_store_user_clean(cachep);
2877    
2878     x[1] = 0;
2879    
2880     diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
2881     index 3cf0764d5793..bd4978ce8c45 100644
2882     --- a/net/bluetooth/hci_conn.c
2883     +++ b/net/bluetooth/hci_conn.c
2884     @@ -1276,14 +1276,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
2885     !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2886     return 0;
2887    
2888     - /* The minimum encryption key size needs to be enforced by the
2889     - * host stack before establishing any L2CAP connections. The
2890     - * specification in theory allows a minimum of 1, but to align
2891     - * BR/EDR and LE transports, a minimum of 7 is chosen.
2892     - */
2893     - if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
2894     - return 0;
2895     -
2896     return 1;
2897     }
2898    
2899     diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
2900     index 1601275efe2d..4c2ef42e189c 100644
2901     --- a/net/netfilter/nf_conntrack_h323_asn1.c
2902     +++ b/net/netfilter/nf_conntrack_h323_asn1.c
2903     @@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
2904     if (bits % BITS_PER_BYTE > 0)
2905     bytes++;
2906    
2907     - if (*bs->cur + bytes > *bs->end)
2908     + if (bs->cur + bytes > bs->end)
2909     return 1;
2910    
2911     return 0;
2912     diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
2913     index e1537ace2b90..5df7486bb416 100644
2914     --- a/net/netfilter/nf_flow_table_core.c
2915     +++ b/net/netfilter/nf_flow_table_core.c
2916     @@ -185,14 +185,25 @@ static const struct rhashtable_params nf_flow_offload_rhash_params = {
2917    
2918     int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
2919     {
2920     - flow->timeout = (u32)jiffies;
2921     + int err;
2922    
2923     - rhashtable_insert_fast(&flow_table->rhashtable,
2924     - &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
2925     - nf_flow_offload_rhash_params);
2926     - rhashtable_insert_fast(&flow_table->rhashtable,
2927     - &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
2928     - nf_flow_offload_rhash_params);
2929     + err = rhashtable_insert_fast(&flow_table->rhashtable,
2930     + &flow->tuplehash[0].node,
2931     + nf_flow_offload_rhash_params);
2932     + if (err < 0)
2933     + return err;
2934     +
2935     + err = rhashtable_insert_fast(&flow_table->rhashtable,
2936     + &flow->tuplehash[1].node,
2937     + nf_flow_offload_rhash_params);
2938     + if (err < 0) {
2939     + rhashtable_remove_fast(&flow_table->rhashtable,
2940     + &flow->tuplehash[0].node,
2941     + nf_flow_offload_rhash_params);
2942     + return err;
2943     + }
2944     +
2945     + flow->timeout = (u32)jiffies;
2946     return 0;
2947     }
2948     EXPORT_SYMBOL_GPL(flow_offload_add);
2949     diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
2950     index 15ed91309992..129e9ec99ec9 100644
2951     --- a/net/netfilter/nf_flow_table_ip.c
2952     +++ b/net/netfilter/nf_flow_table_ip.c
2953     @@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
2954     iph->protocol != IPPROTO_UDP)
2955     return -1;
2956    
2957     + if (iph->ttl <= 1)
2958     + return -1;
2959     +
2960     thoff = iph->ihl * 4;
2961     if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
2962     return -1;
2963     @@ -412,6 +415,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
2964     ip6h->nexthdr != IPPROTO_UDP)
2965     return -1;
2966    
2967     + if (ip6h->hop_limit <= 1)
2968     + return -1;
2969     +
2970     thoff = sizeof(*ip6h);
2971     if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
2972     return -1;
2973     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2974     index ebfcfe1dcbdb..29ff59dd99ac 100644
2975     --- a/net/netfilter/nf_tables_api.c
2976     +++ b/net/netfilter/nf_tables_api.c
2977     @@ -1142,6 +1142,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
2978     u64 pkts, bytes;
2979     int cpu;
2980    
2981     + if (!stats)
2982     + return 0;
2983     +
2984     memset(&total, 0, sizeof(total));
2985     for_each_possible_cpu(cpu) {
2986     cpu_stats = per_cpu_ptr(stats, cpu);
2987     @@ -1199,6 +1202,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
2988     if (nft_is_base_chain(chain)) {
2989     const struct nft_base_chain *basechain = nft_base_chain(chain);
2990     const struct nf_hook_ops *ops = &basechain->ops;
2991     + struct nft_stats __percpu *stats;
2992     struct nlattr *nest;
2993    
2994     nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
2995     @@ -1220,8 +1224,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
2996     if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
2997     goto nla_put_failure;
2998    
2999     - if (rcu_access_pointer(basechain->stats) &&
3000     - nft_dump_stats(skb, rcu_dereference(basechain->stats)))
3001     + stats = rcu_dereference_check(basechain->stats,
3002     + lockdep_commit_lock_is_held(net));
3003     + if (nft_dump_stats(skb, stats))
3004     goto nla_put_failure;
3005     }
3006    
3007     diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
3008     index 436cc14cfc59..7f85af4c40ff 100644
3009     --- a/net/netfilter/nft_flow_offload.c
3010     +++ b/net/netfilter/nft_flow_offload.c
3011     @@ -113,6 +113,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
3012     if (ret < 0)
3013     goto err_flow_add;
3014    
3015     + dst_release(route.tuple[!dir].dst);
3016     return;
3017    
3018     err_flow_add:
3019     diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
3020     index 24d90abfc64d..da31aa8e216e 100644
3021     --- a/sound/core/seq/seq_ports.c
3022     +++ b/sound/core/seq/seq_ports.c
3023     @@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
3024     list_del_init(list);
3025     grp->exclusive = 0;
3026     write_unlock_irq(&grp->list_lock);
3027     - up_write(&grp->list_mutex);
3028    
3029     if (!empty)
3030     unsubscribe_port(client, port, grp, &subs->info, ack);
3031     + up_write(&grp->list_mutex);
3032     }
3033    
3034     /* connect two ports */
3035     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3036     index 9bc8a7cb40ea..45bf89ed31de 100644
3037     --- a/sound/pci/hda/hda_intel.c
3038     +++ b/sound/pci/hda/hda_intel.c
3039     @@ -1883,9 +1883,6 @@ static int azx_first_init(struct azx *chip)
3040     chip->msi = 0;
3041     }
3042    
3043     - if (azx_acquire_irq(chip, 0) < 0)
3044     - return -EBUSY;
3045     -
3046     pci_set_master(pci);
3047     synchronize_irq(bus->irq);
3048    
3049     @@ -2000,6 +1997,9 @@ static int azx_first_init(struct azx *chip)
3050     return -ENODEV;
3051     }
3052    
3053     + if (azx_acquire_irq(chip, 0) < 0)
3054     + return -EBUSY;
3055     +
3056     strcpy(card->driver, "HDA-Intel");
3057     strlcpy(card->shortname, driver_short_names[chip->driver_type],
3058     sizeof(card->shortname));
3059     diff --git a/tools/objtool/check.c b/tools/objtool/check.c
3060     index 46be34576620..02a47e365e52 100644
3061     --- a/tools/objtool/check.c
3062     +++ b/tools/objtool/check.c
3063     @@ -28,6 +28,8 @@
3064     #include <linux/hashtable.h>
3065     #include <linux/kernel.h>
3066    
3067     +#define FAKE_JUMP_OFFSET -1
3068     +
3069     struct alternative {
3070     struct list_head list;
3071     struct instruction *insn;
3072     @@ -501,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
3073     insn->type != INSN_JUMP_UNCONDITIONAL)
3074     continue;
3075    
3076     - if (insn->ignore)
3077     + if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
3078     continue;
3079    
3080     rela = find_rela_by_dest_range(insn->sec, insn->offset,
3081     @@ -670,10 +672,10 @@ static int handle_group_alt(struct objtool_file *file,
3082     clear_insn_state(&fake_jump->state);
3083    
3084     fake_jump->sec = special_alt->new_sec;
3085     - fake_jump->offset = -1;
3086     + fake_jump->offset = FAKE_JUMP_OFFSET;
3087     fake_jump->type = INSN_JUMP_UNCONDITIONAL;
3088     fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
3089     - fake_jump->ignore = true;
3090     + fake_jump->func = orig_insn->func;
3091     }
3092    
3093     if (!special_alt->new_len) {